]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
Merge branches 'pm-core', 'pm-qos', 'pm-domains' and 'pm-opp'
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>
Thu, 2 Mar 2017 23:34:44 +0000 (00:34 +0100)
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>
Thu, 2 Mar 2017 23:34:44 +0000 (00:34 +0100)
* pm-core:
  PM / runtime: Fix some typos

* pm-qos:
  PM / QoS: Remove global notifiers

* pm-domains:
  PM / Domains: Power off masters immediately in the power off sequence
  PM / Domains: Rename is_async to one_dev_on for genpd_power_off()
  PM / Domains: Move genpd_power_off() above genpd_power_on()

* pm-opp:
  PM / OPP: Documentation: Fix opp-microvolt in examples
  PM / OPP: fix off-by-one bug in dev_pm_opp_get_max_volt_latency loop

1753 files changed:
CREDITS
Documentation/ABI/testing/sysfs-class-devfreq-event [new file with mode: 0644]
Documentation/ABI/testing/sysfs-class-led
Documentation/ABI/testing/sysfs-devices-edac
Documentation/ABI/testing/sysfs-kernel-iommu_groups
Documentation/DMA-attributes.txt
Documentation/RCU/Design/Data-Structures/Data-Structures.html
Documentation/RCU/Design/Expedited-Grace-Periods/ExpRCUFlow.svg [new file with mode: 0644]
Documentation/RCU/Design/Expedited-Grace-Periods/ExpSchedFlow.svg [new file with mode: 0644]
Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.html [new file with mode: 0644]
Documentation/RCU/Design/Expedited-Grace-Periods/Funnel0.svg [new file with mode: 0644]
Documentation/RCU/Design/Expedited-Grace-Periods/Funnel1.svg [new file with mode: 0644]
Documentation/RCU/Design/Expedited-Grace-Periods/Funnel2.svg [new file with mode: 0644]
Documentation/RCU/Design/Expedited-Grace-Periods/Funnel3.svg [new file with mode: 0644]
Documentation/RCU/Design/Expedited-Grace-Periods/Funnel4.svg [new file with mode: 0644]
Documentation/RCU/Design/Expedited-Grace-Periods/Funnel5.svg [new file with mode: 0644]
Documentation/RCU/Design/Expedited-Grace-Periods/Funnel6.svg [new file with mode: 0644]
Documentation/RCU/Design/Expedited-Grace-Periods/Funnel7.svg [new file with mode: 0644]
Documentation/RCU/Design/Expedited-Grace-Periods/Funnel8.svg [new file with mode: 0644]
Documentation/RCU/Design/Requirements/Requirements.html
Documentation/RCU/trace.txt
Documentation/admin-guide/kernel-parameters.txt
Documentation/admin-guide/ras.rst
Documentation/cpu-freq/core.txt
Documentation/cpu-freq/cpu-drivers.txt
Documentation/cpu-freq/cpufreq-stats.txt
Documentation/cpu-freq/governors.txt
Documentation/cpu-freq/index.txt
Documentation/cpu-freq/intel-pstate.txt
Documentation/cpu-freq/user-guide.txt
Documentation/devicetree/bindings/arm/arch_timer.txt
Documentation/devicetree/bindings/cpufreq/ti-cpufreq.txt [new file with mode: 0644]
Documentation/devicetree/bindings/devfreq/exynos-bus.txt
Documentation/devicetree/bindings/hwmon/adc128d818.txt [new file with mode: 0644]
Documentation/devicetree/bindings/hwmon/lm70.txt
Documentation/devicetree/bindings/hwmon/lm90.txt
Documentation/devicetree/bindings/hwmon/sht15.txt [new file with mode: 0644]
Documentation/devicetree/bindings/hwmon/stts751.txt [new file with mode: 0644]
Documentation/devicetree/bindings/interrupt-controller/cortina,gemini-interrupt-controller.txt [new file with mode: 0644]
Documentation/devicetree/bindings/interrupt-controller/snps,archs-idu-intc.txt
Documentation/devicetree/bindings/leds/common.txt
Documentation/devicetree/bindings/mtd/aspeed-smc.txt [new file with mode: 0644]
Documentation/devicetree/bindings/mtd/common.txt [new file with mode: 0644]
Documentation/devicetree/bindings/mtd/cortina,gemini-flash.txt [new file with mode: 0644]
Documentation/devicetree/bindings/mtd/jedec,spi-nor.txt
Documentation/devicetree/bindings/mtd/mtk-quadspi.txt
Documentation/devicetree/bindings/net/mediatek-net.txt
Documentation/devicetree/bindings/net/phy.txt
Documentation/devicetree/bindings/power/supply/axp20x_ac_power.txt [new file with mode: 0644]
Documentation/devicetree/bindings/power/supply/axp20x_usb_power.txt
Documentation/devicetree/bindings/power/supply/bq27xxx.txt [new file with mode: 0644]
Documentation/devicetree/bindings/power/supply/qcom_smbb.txt
Documentation/devicetree/bindings/power/supply/sbs_sbs-charger.txt [new file with mode: 0644]
Documentation/devicetree/bindings/power/supply/ti,bq24735.txt
Documentation/devicetree/bindings/power_supply/maxim,max14656.txt [new file with mode: 0644]
Documentation/devicetree/bindings/regulator/anatop-regulator.txt
Documentation/devicetree/bindings/regulator/cpcap-regulator.txt [new file with mode: 0644]
Documentation/devicetree/bindings/regulator/gpio-regulator.txt
Documentation/devicetree/bindings/regulator/qcom,smd-rpm-regulator.txt
Documentation/devicetree/bindings/spi/spi-lantiq-ssc.txt [new file with mode: 0644]
Documentation/devicetree/bindings/spi/spi-rockchip.txt
Documentation/devicetree/bindings/timer/cortina,gemini-timer.txt [new file with mode: 0644]
Documentation/devicetree/bindings/timer/renesas,ostm.txt [new file with mode: 0644]
Documentation/driver-model/devres.txt
Documentation/filesystems/proc.txt
Documentation/hwmon/hwmon-kernel-api.txt
Documentation/hwmon/lm70
Documentation/hwmon/sht21
Documentation/hwmon/sysfs-interface
Documentation/leds/leds-class.txt
Documentation/livepatch/livepatch.txt
Documentation/locking/ww-mutex-design.txt
Documentation/media/uapi/cec/cec-func-close.rst
Documentation/media/uapi/cec/cec-func-ioctl.rst
Documentation/media/uapi/cec/cec-func-open.rst
Documentation/media/uapi/cec/cec-func-poll.rst
Documentation/media/uapi/cec/cec-intro.rst
Documentation/media/uapi/cec/cec-ioc-adap-g-caps.rst
Documentation/media/uapi/cec/cec-ioc-adap-g-log-addrs.rst
Documentation/media/uapi/cec/cec-ioc-adap-g-phys-addr.rst
Documentation/media/uapi/cec/cec-ioc-dqevent.rst
Documentation/media/uapi/cec/cec-ioc-g-mode.rst
Documentation/media/uapi/cec/cec-ioc-receive.rst
Documentation/media/uapi/v4l/pixfmt-007.rst
Documentation/memory-barriers.txt
Documentation/mtd/intel-spi.txt [new file with mode: 0644]
Documentation/power/pm_qos_interface.txt
Documentation/power/runtime_pm.txt
Documentation/power/states.txt
Documentation/scheduler/sched-deadline.txt
Documentation/scheduler/sched-rt-group.txt
Documentation/spi/ep93xx_spi [deleted file]
Documentation/timers/timer_stats.txt [deleted file]
Documentation/x86/zero-page.txt
MAINTAINERS
Makefile
arch/alpha/include/asm/Kbuild
arch/alpha/kernel/osf_sys.c
arch/arc/include/asm/Kbuild
arch/arc/include/asm/delay.h
arch/arc/kernel/head.S
arch/arc/kernel/mcip.c
arch/arc/kernel/smp.c
arch/arc/kernel/unaligned.c
arch/arm/boot/dts/Makefile
arch/arm/boot/dts/imx1.dtsi
arch/arm/boot/dts/imx23.dtsi
arch/arm/boot/dts/imx25.dtsi
arch/arm/boot/dts/imx27.dtsi
arch/arm/boot/dts/imx28.dtsi
arch/arm/boot/dts/imx31.dtsi
arch/arm/boot/dts/imx35.dtsi
arch/arm/boot/dts/imx50.dtsi
arch/arm/boot/dts/imx51.dtsi
arch/arm/boot/dts/imx53.dtsi
arch/arm/boot/dts/imx6dl.dtsi
arch/arm/boot/dts/imx6qdl.dtsi
arch/arm/boot/dts/imx6sl.dtsi
arch/arm/boot/dts/imx6sx.dtsi
arch/arm/boot/dts/imx6ul.dtsi
arch/arm/boot/dts/imx7s.dtsi
arch/arm/boot/dts/orion5x-linkstation-lschl.dts [moved from arch/arm/boot/dts/orion5x-lschl.dts with 98% similarity]
arch/arm/boot/dts/stih407-family.dtsi
arch/arm/configs/exynos_defconfig
arch/arm/configs/ezx_defconfig
arch/arm/configs/imote2_defconfig
arch/arm/configs/multi_v5_defconfig
arch/arm/configs/multi_v7_defconfig
arch/arm/configs/mvebu_v5_defconfig
arch/arm/configs/pxa_defconfig
arch/arm/configs/shmobile_defconfig
arch/arm/include/asm/Kbuild
arch/arm/include/asm/efi.h
arch/arm/include/asm/uaccess.h
arch/arm/kernel/ptrace.c
arch/arm/lib/getuser.S
arch/arm/mach-ep93xx/edb93xx.c
arch/arm/mach-ep93xx/simone.c
arch/arm/mach-ep93xx/vision_ep9307.c
arch/arm/mach-imx/mmdc.c
arch/arm/mach-shmobile/Kconfig
arch/arm/mm/dma-mapping.c
arch/arm/mm/fault.c
arch/arm/mm/fault.h
arch/arm64/Kconfig
arch/arm64/boot/dts/amlogic/meson-gx.dtsi
arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
arch/arm64/crypto/aes-modes.S
arch/arm64/include/asm/Kbuild
arch/arm64/include/asm/arch_timer.h
arch/arm64/include/asm/efi.h
arch/arm64/kernel/topology.c
arch/arm64/mm/dma-mapping.c
arch/avr32/include/asm/Kbuild
arch/blackfin/include/asm/Kbuild
arch/c6x/include/asm/Kbuild
arch/cris/include/asm/Kbuild
arch/frv/include/asm/Kbuild
arch/frv/include/asm/atomic.h
arch/h8300/include/asm/Kbuild
arch/hexagon/include/asm/Kbuild
arch/ia64/include/asm/cputime.h
arch/ia64/include/asm/thread_info.h
arch/ia64/kernel/head.S
arch/ia64/kernel/setup.c
arch/ia64/kernel/time.c
arch/m32r/include/asm/Kbuild
arch/m68k/68000/m68328.c
arch/m68k/68000/m68EZ328.c
arch/m68k/68000/m68VZ328.c
arch/m68k/atari/atakeyb.c
arch/m68k/atari/config.c
arch/m68k/bvme6000/config.c
arch/m68k/bvme6000/rtc.c
arch/m68k/configs/amiga_defconfig
arch/m68k/configs/apollo_defconfig
arch/m68k/configs/atari_defconfig
arch/m68k/configs/bvme6000_defconfig
arch/m68k/configs/hp300_defconfig
arch/m68k/configs/mac_defconfig
arch/m68k/configs/multi_defconfig
arch/m68k/configs/mvme147_defconfig
arch/m68k/configs/mvme16x_defconfig
arch/m68k/configs/q40_defconfig
arch/m68k/configs/sun3_defconfig
arch/m68k/configs/sun3x_defconfig
arch/m68k/include/asm/Kbuild
arch/m68k/include/asm/bug.h
arch/m68k/include/asm/floppy.h
arch/m68k/include/asm/macints.h
arch/m68k/include/asm/math-emu.h
arch/m68k/include/asm/sun3_pgtable.h
arch/m68k/include/asm/sun3xflop.h
arch/m68k/kernel/dma.c
arch/m68k/kernel/module.c
arch/m68k/kernel/process.c
arch/m68k/kernel/signal.c
arch/m68k/kernel/sys_m68k.c
arch/m68k/kernel/uboot.c
arch/m68k/mac/baboon.c
arch/m68k/mac/macints.c
arch/m68k/mac/misc.c
arch/m68k/mac/oss.c
arch/m68k/mac/psc.c
arch/m68k/mac/via.c
arch/m68k/mm/init.c
arch/m68k/mm/memory.c
arch/m68k/mm/sun3kmap.c
arch/m68k/mm/sun3mmu.c
arch/m68k/mvme147/config.c
arch/m68k/mvme16x/config.c
arch/m68k/mvme16x/rtc.c
arch/m68k/q40/config.c
arch/m68k/q40/q40ints.c
arch/m68k/sun3/config.c
arch/m68k/sun3/dvma.c
arch/m68k/sun3/idprom.c
arch/m68k/sun3/mmu_emu.c
arch/m68k/sun3/prom/printf.c
arch/m68k/sun3/sun3dvma.c
arch/m68k/sun3x/dvma.c
arch/m68k/sun3x/prom.c
arch/metag/include/asm/Kbuild
arch/microblaze/include/asm/Kbuild
arch/mips/Kconfig
arch/mips/configs/bmips_stb_defconfig
arch/mips/configs/lemote2f_defconfig
arch/mips/include/asm/Kbuild
arch/mips/kernel/binfmt_elfn32.c
arch/mips/kernel/binfmt_elfo32.c
arch/mn10300/include/asm/Kbuild
arch/mn10300/include/asm/switch_to.h
arch/nios2/include/asm/Kbuild
arch/openrisc/include/asm/Kbuild
arch/parisc/include/asm/Kbuild
arch/parisc/include/asm/bitops.h
arch/parisc/include/uapi/asm/bitsperlong.h
arch/parisc/include/uapi/asm/swab.h
arch/parisc/kernel/binfmt_elf32.c
arch/parisc/kernel/setup.c
arch/powerpc/Kconfig
arch/powerpc/boot/dts/fsl/t2081si-post.dtsi
arch/powerpc/configs/ppc6xx_defconfig
arch/powerpc/include/asm/accounting.h
arch/powerpc/include/asm/cpu_has_feature.h
arch/powerpc/include/asm/cputime.h
arch/powerpc/include/asm/livepatch.h
arch/powerpc/include/asm/mmu.h
arch/powerpc/include/asm/module.h
arch/powerpc/include/asm/paca.h
arch/powerpc/include/asm/reg.h
arch/powerpc/include/asm/stackprotector.h [deleted file]
arch/powerpc/include/asm/xics.h
arch/powerpc/kernel/Makefile
arch/powerpc/kernel/asm-offsets.c
arch/powerpc/kernel/eeh_driver.c
arch/powerpc/kernel/entry_32.S
arch/powerpc/kernel/module_64.c
arch/powerpc/kernel/process.c
arch/powerpc/kernel/prom_init.c
arch/powerpc/kernel/time.c
arch/powerpc/mm/fault.c
arch/powerpc/mm/init_64.c
arch/powerpc/mm/pgtable-radix.c
arch/powerpc/mm/tlb-radix.c
arch/powerpc/platforms/powernv/smp.c
arch/powerpc/sysdev/xics/icp-opal.c
arch/powerpc/xmon/xmon.c
arch/s390/appldata/appldata_os.c
arch/s390/include/asm/cputime.h
arch/s390/include/asm/lowcore.h
arch/s390/include/asm/processor.h
arch/s390/kernel/idle.c
arch/s390/kernel/ptrace.c
arch/s390/kernel/vtime.c
arch/s390/mm/pgtable.c
arch/score/include/asm/Kbuild
arch/sh/configs/sh7785lcr_32bit_defconfig
arch/sh/include/asm/Kbuild
arch/sparc/include/asm/Kbuild
arch/sparc/include/asm/mmu_context_64.h
arch/sparc/kernel/irq_64.c
arch/sparc/kernel/sstate.c
arch/sparc/kernel/traps_64.c
arch/tile/include/asm/Kbuild
arch/tile/include/asm/div64.h [new file with mode: 0644]
arch/tile/kernel/ptrace.c
arch/um/drivers/random.c
arch/um/include/asm/Kbuild
arch/unicore32/include/asm/Kbuild
arch/x86/Kconfig
arch/x86/Kconfig.debug
arch/x86/boot/boot.h
arch/x86/boot/compressed/eboot.c
arch/x86/boot/compressed/head_32.S
arch/x86/boot/compressed/head_64.S
arch/x86/boot/compressed/kaslr.c
arch/x86/boot/string.c
arch/x86/crypto/aesni-intel_glue.c
arch/x86/events/Makefile
arch/x86/events/amd/Makefile [new file with mode: 0644]
arch/x86/events/amd/uncore.c
arch/x86/events/intel/cstate.c
arch/x86/events/intel/pt.c
arch/x86/events/intel/rapl.c
arch/x86/events/intel/uncore.c
arch/x86/include/asm/Kbuild
arch/x86/include/asm/apic.h
arch/x86/include/asm/cpufeatures.h
arch/x86/include/asm/div64.h
arch/x86/include/asm/e820.h
arch/x86/include/asm/efi.h
arch/x86/include/asm/elf.h
arch/x86/include/asm/fpu/internal.h
arch/x86/include/asm/intel-mid.h
arch/x86/include/asm/io.h
arch/x86/include/asm/mce.h
arch/x86/include/asm/microcode.h
arch/x86/include/asm/microcode_amd.h
arch/x86/include/asm/msr-index.h
arch/x86/include/asm/msr.h
arch/x86/include/asm/pgtable_32.h
arch/x86/include/asm/processor.h
arch/x86/include/asm/spinlock.h
arch/x86/include/asm/uv/uv.h
arch/x86/include/asm/uv/uv_hub.h
arch/x86/include/uapi/asm/bootparam.h
arch/x86/include/uapi/asm/hwcap2.h [new file with mode: 0644]
arch/x86/kernel/Makefile
arch/x86/kernel/acpi/boot.c
arch/x86/kernel/acpi/cstate.c
arch/x86/kernel/apic/apic.c
arch/x86/kernel/apic/io_apic.c
arch/x86/kernel/apic/x2apic_uv_x.c
arch/x86/kernel/apm_32.c
arch/x86/kernel/asm-offsets.c
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/centaur.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/cyrix.c
arch/x86/kernel/cpu/intel.c
arch/x86/kernel/cpu/mcheck/mce-apei.c
arch/x86/kernel/cpu/mcheck/mce-genpool.c
arch/x86/kernel/cpu/mcheck/mce-inject.c
arch/x86/kernel/cpu/mcheck/mce-internal.h
arch/x86/kernel/cpu/mcheck/mce.c
arch/x86/kernel/cpu/mcheck/mce_amd.c
arch/x86/kernel/cpu/mcheck/therm_throt.c
arch/x86/kernel/cpu/microcode/amd.c
arch/x86/kernel/cpu/microcode/core.c
arch/x86/kernel/cpu/microcode/intel.c
arch/x86/kernel/cpu/transmeta.c
arch/x86/kernel/e820.c
arch/x86/kernel/fpu/core.c
arch/x86/kernel/fpu/init.c
arch/x86/kernel/fpu/xstate.c
arch/x86/kernel/head32.c
arch/x86/kernel/head_32.S
arch/x86/kernel/hpet.c
arch/x86/kernel/itmt.c
arch/x86/kernel/jump_label.c
arch/x86/kernel/kprobes/core.c
arch/x86/kernel/kvm.c
arch/x86/kernel/kvmclock.c
arch/x86/kernel/paravirt-spinlocks.c
arch/x86/kernel/pci-calgary_64.c
arch/x86/kernel/setup.c
arch/x86/kernel/smpboot.c
arch/x86/kernel/test_nx.c [deleted file]
arch/x86/kernel/traps.c
arch/x86/kernel/tsc.c
arch/x86/kernel/tsc_sync.c
arch/x86/kernel/vm86_32.c
arch/x86/kvm/hyperv.c
arch/x86/kvm/x86.c
arch/x86/lib/delay.c
arch/x86/mm/dump_pagetables.c
arch/x86/mm/pageattr.c
arch/x86/mm/pat_rbtree.c
arch/x86/platform/efi/efi-bgrt.c
arch/x86/platform/efi/efi.c
arch/x86/platform/efi/efi_64.c
arch/x86/platform/intel-mid/device_libs/Makefile
arch/x86/platform/intel-mid/device_libs/platform_gpio_keys.c
arch/x86/platform/intel-mid/device_libs/platform_ipc.c [deleted file]
arch/x86/platform/intel-mid/device_libs/platform_ipc.h [deleted file]
arch/x86/platform/intel-mid/device_libs/platform_mrfld_rtc.c [new file with mode: 0644]
arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c
arch/x86/platform/intel-mid/device_libs/platform_msic_audio.c
arch/x86/platform/intel-mid/device_libs/platform_msic_battery.c
arch/x86/platform/intel-mid/device_libs/platform_msic_gpio.c
arch/x86/platform/intel-mid/device_libs/platform_msic_ocd.c
arch/x86/platform/intel-mid/device_libs/platform_msic_power_btn.c
arch/x86/platform/intel-mid/device_libs/platform_msic_thermal.c
arch/x86/platform/intel-mid/device_libs/platform_pmic_gpio.c [deleted file]
arch/x86/platform/intel-mid/mrfld.c
arch/x86/platform/intel-mid/sfi.c
arch/x86/platform/uv/uv_nmi.c
arch/x86/ras/Kconfig
arch/x86/xen/spinlock.c
arch/xtensa/include/asm/Kbuild
arch/xtensa/kernel/setup.c
block/blk-lib.c
block/cfq-iosched.c
crypto/algapi.c
crypto/algif_aead.c
drivers/acpi/Makefile
drivers/acpi/acpi_extlog.c
drivers/acpi/acpica/tbdata.c
drivers/acpi/acpica/tbinstal.c
drivers/acpi/arm64/iort.c
drivers/acpi/bgrt.c
drivers/acpi/gsi.c [deleted file]
drivers/acpi/irq.c [new file with mode: 0644]
drivers/acpi/nfit/core.c
drivers/acpi/nfit/mce.c
drivers/acpi/processor_perflib.c
drivers/acpi/resource.c
drivers/acpi/sleep.c
drivers/acpi/video_detect.c
drivers/ata/libata-core.c
drivers/ata/sata_mv.c
drivers/base/cpu.c
drivers/base/firmware_class.c
drivers/base/memory.c
drivers/base/platform-msi.c
drivers/base/platform.c
drivers/base/power/domain.c
drivers/base/power/qos.c
drivers/base/power/runtime.c
drivers/base/power/wakeirq.c
drivers/base/regmap/regcache-rbtree.c
drivers/base/regmap/regcache.c
drivers/base/regmap/regmap-irq.c
drivers/base/regmap/regmap.c
drivers/bcma/bcma_private.h
drivers/bcma/driver_chipcommon.c
drivers/bcma/driver_mips.c
drivers/block/drbd/drbd_bitmap.c
drivers/block/drbd/drbd_main.c
drivers/block/drbd/drbd_req.c
drivers/block/rbd.c
drivers/block/virtio_blk.c
drivers/block/xen-blkfront.c
drivers/char/hw_random/core.c
drivers/clocksource/Kconfig
drivers/clocksource/Makefile
drivers/clocksource/arm_arch_timer.c
drivers/clocksource/clkevt-probe.c [new file with mode: 0644]
drivers/clocksource/renesas-ostm.c [new file with mode: 0644]
drivers/clocksource/tcb_clksrc.c
drivers/clocksource/timer-gemini.c [new file with mode: 0644]
drivers/cpufreq/Kconfig
drivers/cpufreq/Kconfig.arm
drivers/cpufreq/Makefile
drivers/cpufreq/bmips-cpufreq.c [new file with mode: 0644]
drivers/cpufreq/brcmstb-avs-cpufreq.c
drivers/cpufreq/cpufreq-dt-platdev.c
drivers/cpufreq/cpufreq.c
drivers/cpufreq/cpufreq_governor.c
drivers/cpufreq/cpufreq_stats.c
drivers/cpufreq/intel_pstate.c
drivers/cpufreq/powernv-cpufreq.c
drivers/cpufreq/ppc_cbe_cpufreq_pmi.c
drivers/cpufreq/qoriq-cpufreq.c
drivers/cpufreq/s3c2416-cpufreq.c
drivers/cpufreq/ti-cpufreq.c [new file with mode: 0644]
drivers/cpuidle/governors/menu.c
drivers/crypto/ccp/ccp-dev-v5.c
drivers/crypto/ccp/ccp-dev.h
drivers/crypto/ccp/ccp-dmaengine.c
drivers/crypto/chelsio/chcr_algo.c
drivers/crypto/chelsio/chcr_core.c
drivers/crypto/chelsio/chcr_crypto.h
drivers/crypto/qat/qat_c62x/adf_drv.c
drivers/crypto/qat/qat_common/adf_accel_devices.h
drivers/crypto/qat/qat_common/qat_hal.c
drivers/devfreq/devfreq-event.c
drivers/devfreq/devfreq.c
drivers/devfreq/event/exynos-ppmu.c
drivers/devfreq/exynos-bus.c
drivers/devfreq/governor.h
drivers/devfreq/governor_passive.c
drivers/devfreq/governor_userspace.c
drivers/dma/cppi41.c
drivers/dma/pl330.c
drivers/edac/amd64_edac.c
drivers/edac/amd64_edac.h
drivers/edac/edac_mc.c
drivers/edac/edac_mc.h
drivers/edac/edac_mc_sysfs.c
drivers/edac/fsl_ddr_edac.c
drivers/edac/i7300_edac.c
drivers/edac/i7core_edac.c
drivers/edac/i82975x_edac.c
drivers/edac/mce_amd.c
drivers/edac/mce_amd.h
drivers/edac/mpc85xx_edac.c
drivers/edac/sb_edac.c
drivers/edac/skx_edac.c
drivers/firmware/efi/arm-init.c
drivers/firmware/efi/efi.c
drivers/firmware/efi/esrt.c
drivers/firmware/efi/libstub/Makefile
drivers/firmware/efi/libstub/arm-stub.c
drivers/firmware/efi/libstub/efi-stub-helper.c
drivers/firmware/efi/libstub/efistub.h
drivers/firmware/efi/libstub/fdt.c
drivers/firmware/efi/libstub/secureboot.c [new file with mode: 0644]
drivers/firmware/efi/memattr.c
drivers/gpio/gpiolib.c
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
drivers/gpu/drm/amd/amdgpu/dce_virtual.c
drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
drivers/gpu/drm/amd/amdgpu/si_dpm.c
drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
drivers/gpu/drm/ast/ast_drv.h
drivers/gpu/drm/ast/ast_main.c
drivers/gpu/drm/ast/ast_post.c
drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
drivers/gpu/drm/cirrus/Kconfig
drivers/gpu/drm/drm_atomic.c
drivers/gpu/drm/drm_atomic_helper.c
drivers/gpu/drm/drm_connector.c
drivers/gpu/drm/drm_dp_mst_topology.c
drivers/gpu/drm/drm_drv.c
drivers/gpu/drm/drm_gem_cma_helper.c
drivers/gpu/drm/drm_info.c
drivers/gpu/drm/drm_mode_object.c
drivers/gpu/drm/drm_modes.c
drivers/gpu/drm/drm_probe_helper.c
drivers/gpu/drm/etnaviv/etnaviv_gem.c
drivers/gpu/drm/etnaviv/etnaviv_mmu.c
drivers/gpu/drm/exynos/exynos5433_drm_decon.c
drivers/gpu/drm/i915/gvt/aperture_gm.c
drivers/gpu/drm/i915/gvt/cfg_space.c
drivers/gpu/drm/i915/gvt/cmd_parser.c
drivers/gpu/drm/i915/gvt/execlist.c
drivers/gpu/drm/i915/gvt/gtt.c
drivers/gpu/drm/i915/gvt/gtt.h
drivers/gpu/drm/i915/gvt/gvt.c
drivers/gpu/drm/i915/gvt/gvt.h
drivers/gpu/drm/i915/gvt/handlers.c
drivers/gpu/drm/i915/gvt/kvmgt.c
drivers/gpu/drm/i915/gvt/mmio.c
drivers/gpu/drm/i915/gvt/mmio.h
drivers/gpu/drm/i915/gvt/opregion.c
drivers/gpu/drm/i915/gvt/reg.h
drivers/gpu/drm/i915/gvt/scheduler.c
drivers/gpu/drm/i915/gvt/scheduler.h
drivers/gpu/drm/i915/gvt/vgpu.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_evict.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_gem_internal.c
drivers/gpu/drm/i915/i915_gem_object.h
drivers/gpu/drm/i915/i915_vma.c
drivers/gpu/drm/i915/intel_atomic_plane.c
drivers/gpu/drm/i915/intel_crt.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dpll_mgr.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_fbc.c
drivers/gpu/drm/i915/intel_fbdev.c
drivers/gpu/drm/i915/intel_lrc.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_sprite.c
drivers/gpu/drm/msm/adreno/adreno_gpu.c
drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
drivers/gpu/drm/msm/msm_gem.c
drivers/gpu/drm/nouveau/dispnv04/hw.c
drivers/gpu/drm/nouveau/nouveau_display.c
drivers/gpu/drm/nouveau/nouveau_drm.c
drivers/gpu/drm/nouveau/nouveau_drv.h
drivers/gpu/drm/nouveau/nouveau_fbcon.c
drivers/gpu/drm/nouveau/nouveau_fence.c
drivers/gpu/drm/nouveau/nouveau_fence.h
drivers/gpu/drm/nouveau/nouveau_led.h
drivers/gpu/drm/nouveau/nouveau_usif.c
drivers/gpu/drm/nouveau/nv50_display.c
drivers/gpu/drm/nouveau/nv84_fence.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
drivers/gpu/drm/omapdrm/omap_gem.c
drivers/gpu/drm/radeon/radeon_cursor.c
drivers/gpu/drm/radeon/radeon_drv.c
drivers/gpu/drm/radeon/radeon_gem.c
drivers/gpu/drm/radeon/si.c
drivers/gpu/drm/radeon/si_dpm.c
drivers/gpu/drm/ttm/ttm_bo.c
drivers/gpu/drm/ttm/ttm_execbuf_util.c
drivers/gpu/drm/ttm/ttm_object.c
drivers/gpu/drm/vc4/vc4_crtc.c
drivers/gpu/drm/vc4/vc4_gem.c
drivers/gpu/drm/vc4/vc4_plane.c
drivers/gpu/drm/vc4/vc4_render_cl.c
drivers/gpu/drm/virtio/virtgpu_fb.c
drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
drivers/hid/Kconfig
drivers/hid/hid-core.c
drivers/hid/hid-cp2112.c
drivers/hid/hid-ids.h
drivers/hid/hid-lg.c
drivers/hid/hid-mf.c
drivers/hid/hid-microsoft.c
drivers/hid/hid-multitouch.c
drivers/hid/hid-rmi.c
drivers/hid/intel-ish-hid/ipc/hw-ish-regs.h
drivers/hid/intel-ish-hid/ipc/hw-ish.h
drivers/hid/intel-ish-hid/ipc/pci-ish.c
drivers/hid/intel-ish-hid/ishtp-hid.c
drivers/hid/intel-ish-hid/ishtp/bus.c
drivers/hid/intel-ish-hid/ishtp/hbm.c
drivers/hid/intel-ish-hid/ishtp/init.c
drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h
drivers/hid/usbhid/hid-core.c
drivers/hid/usbhid/hid-quirks.c
drivers/hid/usbhid/usbkbd.c
drivers/hid/usbhid/usbmouse.c
drivers/hid/wacom.h
drivers/hid/wacom_sys.c
drivers/hid/wacom_wac.c
drivers/hid/wacom_wac.h
drivers/hv/ring_buffer.c
drivers/hwmon/Kconfig
drivers/hwmon/Makefile
drivers/hwmon/adc128d818.c
drivers/hwmon/adm1021.c
drivers/hwmon/adm1025.c
drivers/hwmon/adm1026.c
drivers/hwmon/adm1031.c
drivers/hwmon/adm9240.c
drivers/hwmon/adt7411.c
drivers/hwmon/adt7470.c
drivers/hwmon/adt7475.c
drivers/hwmon/adt7x10.c
drivers/hwmon/asb100.c
drivers/hwmon/atxp1.c
drivers/hwmon/dme1737.c
drivers/hwmon/ds1621.c
drivers/hwmon/emc2103.c
drivers/hwmon/f71805f.c
drivers/hwmon/f71882fg.c
drivers/hwmon/fam15h_power.c
drivers/hwmon/fschmd.c
drivers/hwmon/g760a.c
drivers/hwmon/g762.c
drivers/hwmon/gl518sm.c
drivers/hwmon/gl520sm.c
drivers/hwmon/gpio-fan.c
drivers/hwmon/hwmon.c
drivers/hwmon/i5500_temp.c
drivers/hwmon/i5k_amb.c
drivers/hwmon/it87.c
drivers/hwmon/jz4740-hwmon.c
drivers/hwmon/k10temp.c
drivers/hwmon/k8temp.c
drivers/hwmon/lm63.c
drivers/hwmon/lm70.c
drivers/hwmon/lm78.c
drivers/hwmon/lm80.c
drivers/hwmon/lm83.c
drivers/hwmon/lm85.c
drivers/hwmon/lm87.c
drivers/hwmon/lm90.c
drivers/hwmon/lm92.c
drivers/hwmon/lm93.c
drivers/hwmon/lm95234.c
drivers/hwmon/ltc4151.c
drivers/hwmon/max1111.c
drivers/hwmon/max1619.c
drivers/hwmon/max197.c
drivers/hwmon/max6650.c
drivers/hwmon/mc13783-adc.c
drivers/hwmon/mcp3021.c
drivers/hwmon/nct6683.c
drivers/hwmon/nct6775.c
drivers/hwmon/nsa320-hwmon.c
drivers/hwmon/pc87360.c
drivers/hwmon/pc87427.c
drivers/hwmon/pcf8591.c
drivers/hwmon/sch5627.c
drivers/hwmon/sch56xx-common.c
drivers/hwmon/sht15.c
drivers/hwmon/sht21.c
drivers/hwmon/sis5595.c
drivers/hwmon/smsc47m1.c
drivers/hwmon/smsc47m192.c
drivers/hwmon/stts751.c [new file with mode: 0644]
drivers/hwmon/tmp401.c
drivers/hwmon/via-cputemp.c
drivers/hwmon/via686a.c
drivers/hwmon/vt8231.c
drivers/hwmon/w83627ehf.c
drivers/hwmon/w83627hf.c
drivers/hwmon/w83781d.c
drivers/hwmon/w83791d.c
drivers/hwmon/w83792d.c
drivers/hwmon/w83793.c
drivers/i2c/busses/i2c-cadence.c
drivers/i2c/busses/i2c-designware-core.c
drivers/i2c/busses/i2c-designware-core.h
drivers/i2c/busses/i2c-imx-lpi2c.c
drivers/i2c/busses/i2c-piix4.c
drivers/iio/adc/palmas_gpadc.c
drivers/iio/health/afe4403.c
drivers/iio/health/afe4404.c
drivers/iio/health/max30100.c
drivers/iio/humidity/dht11.c
drivers/infiniband/core/cma.c
drivers/infiniband/core/umem.c
drivers/infiniband/hw/cxgb3/iwch_cm.h
drivers/infiniband/hw/cxgb3/iwch_provider.c
drivers/infiniband/hw/cxgb3/iwch_qp.c
drivers/infiniband/hw/cxgb4/cm.c
drivers/infiniband/hw/cxgb4/cq.c
drivers/infiniband/hw/cxgb4/device.c
drivers/infiniband/hw/cxgb4/iw_cxgb4.h
drivers/infiniband/hw/cxgb4/provider.c
drivers/infiniband/hw/cxgb4/qp.c
drivers/infiniband/hw/cxgb4/t4.h
drivers/infiniband/hw/i40iw/i40iw_verbs.c
drivers/infiniband/hw/nes/nes_verbs.c
drivers/infiniband/hw/qedr/main.c
drivers/infiniband/hw/qedr/qedr.h
drivers/infiniband/hw/qedr/qedr_cm.c
drivers/infiniband/hw/qedr/verbs.c
drivers/infiniband/hw/usnic/usnic_ib_sysfs.c
drivers/infiniband/hw/usnic/usnic_ib_verbs.c
drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
drivers/infiniband/sw/rxe/rxe_mr.c
drivers/infiniband/sw/rxe/rxe_net.c
drivers/infiniband/sw/rxe/rxe_qp.c
drivers/infiniband/sw/rxe/rxe_resp.c
drivers/infiniband/ulp/iser/iscsi_iser.c
drivers/infiniband/ulp/iser/iscsi_iser.h
drivers/infiniband/ulp/iser/iser_verbs.c
drivers/infiniband/ulp/srp/ib_srp.c
drivers/input/misc/uinput.c
drivers/input/mouse/elan_i2c_core.c
drivers/input/rmi4/Kconfig
drivers/input/rmi4/rmi_driver.c
drivers/input/touchscreen/wm97xx-core.c
drivers/iommu/Kconfig
drivers/iommu/amd_iommu.c
drivers/iommu/amd_iommu_init.c
drivers/iommu/amd_iommu_types.h
drivers/iommu/arm-smmu-v3.c
drivers/iommu/arm-smmu.c
drivers/iommu/dma-iommu.c
drivers/iommu/dmar.c
drivers/iommu/exynos-iommu.c
drivers/iommu/intel-iommu.c
drivers/iommu/io-pgtable-arm-v7s.c
drivers/iommu/io-pgtable-arm.c
drivers/iommu/iommu-sysfs.c
drivers/iommu/iommu.c
drivers/iommu/iova.c
drivers/iommu/ipmmu-vmsa.c
drivers/iommu/msm_iommu.c
drivers/iommu/msm_iommu.h
drivers/iommu/mtk_iommu.c
drivers/iommu/mtk_iommu.h
drivers/iommu/of_iommu.c
drivers/irqchip/Kconfig
drivers/irqchip/Makefile
drivers/irqchip/irq-gemini.c [new file with mode: 0644]
drivers/irqchip/irq-gic-v3-its.c
drivers/irqchip/irq-keystone.c
drivers/irqchip/irq-mips-gic.c
drivers/irqchip/irq-mxs.c
drivers/irqchip/qcom-irq-combiner.c [new file with mode: 0644]
drivers/isdn/hardware/eicon/message.c
drivers/isdn/mISDN/stack.c
drivers/leds/Kconfig
drivers/leds/led-class.c
drivers/leds/leds-ktd2692.c
drivers/leds/trigger/ledtrig-heartbeat.c
drivers/macintosh/rack-meter.c
drivers/md/dm-bufio.c
drivers/md/dm-crypt.c
drivers/md/dm-mpath.c
drivers/md/dm-rq.c
drivers/md/md.c
drivers/md/persistent-data/dm-block-manager.c
drivers/md/raid5-cache.c
drivers/md/raid5.c
drivers/md/raid5.h
drivers/media/cec/cec-adap.c
drivers/media/dvb-core/dvb_net.c
drivers/media/i2c/Kconfig
drivers/media/i2c/smiapp/smiapp-core.c
drivers/media/i2c/tvp5150.c
drivers/media/i2c/tvp5150_reg.h
drivers/media/pci/cobalt/cobalt-driver.c
drivers/media/pci/cobalt/cobalt-driver.h
drivers/media/usb/dvb-usb/pctv452e.c
drivers/media/usb/siano/smsusb.c
drivers/memstick/core/memstick.c
drivers/mfd/lpc_ich.c
drivers/misc/genwqe/card_dev.c
drivers/misc/lkdtm.h
drivers/misc/lkdtm_bugs.c
drivers/misc/lkdtm_core.c
drivers/misc/mei/debugfs.c
drivers/mmc/core/mmc.c
drivers/mmc/host/dw_mmc.c
drivers/mmc/host/mmci.c
drivers/mmc/host/sdhci.c
drivers/mtd/bcm47xxpart.c
drivers/mtd/devices/bcm47xxsflash.c
drivers/mtd/devices/bcm47xxsflash.h
drivers/mtd/devices/m25p80.c
drivers/mtd/devices/serial_flash_cmds.h
drivers/mtd/devices/st_spi_fsm.c
drivers/mtd/maps/Kconfig
drivers/mtd/maps/Makefile
drivers/mtd/maps/ichxrom.c
drivers/mtd/maps/lantiq-flash.c
drivers/mtd/maps/physmap_of.c
drivers/mtd/maps/physmap_of_gemini.c [new file with mode: 0644]
drivers/mtd/maps/physmap_of_gemini.h [new file with mode: 0644]
drivers/mtd/maps/physmap_of_versatile.c
drivers/mtd/maps/pmcmsp-flash.c
drivers/mtd/mtdchar.c
drivers/mtd/mtdcore.c
drivers/mtd/mtdpart.c
drivers/mtd/nand/Kconfig
drivers/mtd/nand/fsl_ifc_nand.c
drivers/mtd/nand/fsmc_nand.c
drivers/mtd/nand/lpc32xx_slc.c
drivers/mtd/nand/mtk_nand.c
drivers/mtd/nand/nand_base.c
drivers/mtd/nand/nand_ids.c
drivers/mtd/nand/sunxi_nand.c
drivers/mtd/nand/xway_nand.c
drivers/mtd/ofpart.c
drivers/mtd/spi-nor/Kconfig
drivers/mtd/spi-nor/Makefile
drivers/mtd/spi-nor/aspeed-smc.c [new file with mode: 0644]
drivers/mtd/spi-nor/cadence-quadspi.c
drivers/mtd/spi-nor/fsl-quadspi.c
drivers/mtd/spi-nor/intel-spi-platform.c [new file with mode: 0644]
drivers/mtd/spi-nor/intel-spi.c [new file with mode: 0644]
drivers/mtd/spi-nor/intel-spi.h [new file with mode: 0644]
drivers/mtd/spi-nor/spi-nor.c
drivers/net/can/c_can/c_can_pci.c
drivers/net/can/ti_hecc.c
drivers/net/ethernet/adaptec/starfire.c
drivers/net/ethernet/amd/xgbe/xgbe-common.h
drivers/net/ethernet/amd/xgbe/xgbe-dev.c
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
drivers/net/ethernet/amd/xgbe/xgbe-pci.c
drivers/net/ethernet/amd/xgbe/xgbe.h
drivers/net/ethernet/atheros/alx/main.c
drivers/net/ethernet/broadcom/bcm63xx_enet.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/cadence/macb.c
drivers/net/ethernet/cadence/macb.h
drivers/net/ethernet/cavium/thunder/thunder_bgx.c
drivers/net/ethernet/cavium/thunder/thunder_bgx.h
drivers/net/ethernet/cavium/thunder/thunder_xcv.c
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/freescale/gianfar.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
drivers/net/ethernet/hisilicon/hns/hns_enet.c
drivers/net/ethernet/ibm/ibmveth.c
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/mediatek/mtk_eth_soc.c
drivers/net/ethernet/mellanox/mlx4/catas.c
drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlx4/en_rx.c
drivers/net/ethernet/mellanox/mlx4/intf.c
drivers/net/ethernet/mellanox/mlx4/mlx4.h
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/mellanox/mlx5/core/port.c
drivers/net/ethernet/mellanox/mlx5/core/vport.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
drivers/net/ethernet/qlogic/qed/qed_ll2.c
drivers/net/ethernet/qlogic/qed/qed_ll2.h
drivers/net/ethernet/qlogic/qed/qed_roce.c
drivers/net/ethernet/renesas/ravb_main.c
drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
drivers/net/ethernet/ti/cpsw.c
drivers/net/ethernet/xilinx/xilinx_emaclite.c
drivers/net/gtp.c
drivers/net/hamradio/mkiss.c
drivers/net/hyperv/netvsc.c
drivers/net/loopback.c
drivers/net/macvtap.c
drivers/net/phy/bcm63xx.c
drivers/net/phy/dp83848.c
drivers/net/phy/marvell.c
drivers/net/phy/mdio-bcm-iproc.c
drivers/net/phy/micrel.c
drivers/net/phy/phy.c
drivers/net/phy/phy_device.c
drivers/net/phy/phy_led_triggers.c
drivers/net/tun.c
drivers/net/usb/catc.c
drivers/net/usb/cdc_ether.c
drivers/net/usb/pegasus.c
drivers/net/usb/qmi_wwan.c
drivers/net/usb/r8152.c
drivers/net/usb/rtl8150.c
drivers/net/usb/sierra_net.c
drivers/net/virtio_net.c
drivers/net/vxlan.c
drivers/net/wireless/intel/iwlwifi/iwl-8000.c
drivers/net/wireless/intel/iwlwifi/mvm/sta.c
drivers/net/wireless/intel/iwlwifi/mvm/tt.c
drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
drivers/net/xen-netback/common.h
drivers/net/xen-netback/interface.c
drivers/net/xen-netback/xenbus.c
drivers/net/xen-netfront.c
drivers/ntb/hw/intel/ntb_hw_intel.c
drivers/ntb/ntb_transport.c
drivers/ntb/test/ntb_perf.c
drivers/nvdimm/namespace_devs.c
drivers/nvdimm/pfn_devs.c
drivers/nvme/host/fc.c
drivers/nvme/target/configfs.c
drivers/nvme/target/core.c
drivers/nvme/target/fc.c
drivers/nvme/target/nvmet.h
drivers/nvme/target/rdma.c
drivers/parport/parport_gsc.c
drivers/pci/hotplug/pciehp_ctrl.c
drivers/pci/hotplug/pnv_php.c
drivers/pci/msi.c
drivers/pci/pci.c
drivers/pci/pcie/aspm.c
drivers/pci/pcie/pme.c
drivers/pci/slot.c
drivers/pinctrl/berlin/berlin-bg4ct.c
drivers/pinctrl/intel/pinctrl-baytrail.c
drivers/pinctrl/intel/pinctrl-broxton.c
drivers/pinctrl/intel/pinctrl-intel.c
drivers/pinctrl/intel/pinctrl-merrifield.c
drivers/pinctrl/meson/pinctrl-meson-gxbb.c
drivers/pinctrl/meson/pinctrl-meson-gxl.c
drivers/pinctrl/pinctrl-amd.c
drivers/pinctrl/sunxi/pinctrl-sunxi.c
drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c
drivers/platform/x86/Kconfig
drivers/platform/x86/Makefile
drivers/platform/x86/ideapad-laptop.c
drivers/platform/x86/intel_mid_powerbtn.c
drivers/platform/x86/intel_pmic_gpio.c [deleted file]
drivers/platform/x86/mlx-platform.c
drivers/platform/x86/surface3-wmi.c
drivers/power/reset/Kconfig
drivers/power/reset/at91-poweroff.c
drivers/power/reset/at91-reset.c
drivers/power/reset/at91-sama5d2_shdwc.c
drivers/power/supply/Kconfig
drivers/power/supply/Makefile
drivers/power/supply/ab8500_btemp.c
drivers/power/supply/axp20x_ac_power.c [new file with mode: 0644]
drivers/power/supply/axp20x_usb_power.c
drivers/power/supply/axp288_charger.c
drivers/power/supply/axp288_fuel_gauge.c
drivers/power/supply/bq2415x_charger.c
drivers/power/supply/bq24190_charger.c
drivers/power/supply/bq24735-charger.c
drivers/power/supply/bq27xxx_battery.c
drivers/power/supply/bq27xxx_battery_i2c.c
drivers/power/supply/gpio-charger.c
drivers/power/supply/intel_mid_battery.c [deleted file]
drivers/power/supply/max14656_charger_detector.c [new file with mode: 0644]
drivers/power/supply/max8997_charger.c
drivers/power/supply/pcf50633-charger.c
drivers/power/supply/qcom_smbb.c
drivers/power/supply/sbs-charger.c [new file with mode: 0644]
drivers/power/supply/tps65217_charger.c
drivers/power/supply/wm97xx_battery.c
drivers/regulator/88pm800.c
drivers/regulator/88pm8607.c
drivers/regulator/Kconfig
drivers/regulator/Makefile
drivers/regulator/aat2870-regulator.c
drivers/regulator/act8945a-regulator.c
drivers/regulator/ad5398.c
drivers/regulator/anatop-regulator.c
drivers/regulator/arizona-ldo1.c
drivers/regulator/arizona-micsupp.c
drivers/regulator/as3711-regulator.c
drivers/regulator/axp20x-regulator.c
drivers/regulator/bcm590xx-regulator.c
drivers/regulator/core.c
drivers/regulator/cpcap-regulator.c [new file with mode: 0644]
drivers/regulator/devres.c
drivers/regulator/fan53555.c
drivers/regulator/fixed.c
drivers/regulator/hi655x-regulator.c
drivers/regulator/internal.h
drivers/regulator/lp8755.c
drivers/regulator/ltc3589.c
drivers/regulator/ltc3676.c
drivers/regulator/max14577-regulator.c
drivers/regulator/max77620-regulator.c
drivers/regulator/max77686-regulator.c
drivers/regulator/max77693-regulator.c
drivers/regulator/max77802-regulator.c
drivers/regulator/max8907-regulator.c
drivers/regulator/max8925-regulator.c
drivers/regulator/max8952.c
drivers/regulator/palmas-regulator.c
drivers/regulator/pbias-regulator.c
drivers/regulator/pcap-regulator.c
drivers/regulator/pcf50633-regulator.c
drivers/regulator/pfuze100-regulator.c
drivers/regulator/pv88060-regulator.c
drivers/regulator/pv88080-regulator.c
drivers/regulator/pv88090-regulator.c
drivers/regulator/qcom_smd-regulator.c
drivers/regulator/rc5t583-regulator.c
drivers/regulator/rn5t618-regulator.c
drivers/regulator/s2mpa01.c
drivers/regulator/tps65086-regulator.c
drivers/regulator/tps65217-regulator.c
drivers/regulator/twl6030-regulator.c
drivers/reset/core.c
drivers/rtc/Kconfig
drivers/rtc/rtc-jz4740.c
drivers/s390/scsi/zfcp_fsf.c
drivers/scsi/aacraid/comminit.c
drivers/scsi/bnx2fc/bnx2fc_io.c
drivers/scsi/cxgbi/libcxgbi.h
drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
drivers/scsi/lpfc/lpfc_debugfs.c
drivers/scsi/lpfc/lpfc_els.c
drivers/scsi/lpfc/lpfc_hbadisc.c
drivers/scsi/lpfc/lpfc_init.c
drivers/scsi/mpt3sas/mpt3sas_scsih.c
drivers/scsi/qla2xxx/qla_isr.c
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/qla2xxx/tcm_qla2xxx.c
drivers/scsi/scsi_lib.c
drivers/scsi/sd.c
drivers/scsi/sg.c
drivers/scsi/virtio_scsi.c
drivers/spi/Kconfig
drivers/spi/Makefile
drivers/spi/spi-armada-3700.c
drivers/spi/spi-ath79.c
drivers/spi/spi-bcm-qspi.c
drivers/spi/spi-bcm53xx.c
drivers/spi/spi-dw.c
drivers/spi/spi-dw.h
drivers/spi/spi-ep93xx.c
drivers/spi/spi-fsl-lpspi.c
drivers/spi/spi-fsl-spi.c
drivers/spi/spi-imx.c
drivers/spi/spi-lantiq-ssc.c [new file with mode: 0644]
drivers/spi/spi-mpc52xx.c
drivers/spi/spi-mt65xx.c
drivers/spi/spi-ppc4xx.c
drivers/spi/spi-pxa2xx-pci.c
drivers/spi/spi-pxa2xx.c
drivers/spi/spi-rockchip.c
drivers/spi/spi-rspi.c
drivers/spi/spi-s3c64xx.c
drivers/spi/spi-sh-msiof.c
drivers/spi/spi-ti-qspi.c
drivers/spi/spi-topcliff-pch.c
drivers/spi/spi.c
drivers/staging/android/ion/ion.c
drivers/staging/comedi/comedi_buf.c
drivers/staging/greybus/timesync_platform.c
drivers/staging/lustre/lnet/libcfs/linux/linux-debug.c
drivers/staging/lustre/lustre/llite/llite_mmap.c
drivers/target/target_core_device.c
drivers/target/target_core_pr.c
drivers/target/target_core_sbc.c
drivers/target/target_core_transport.c
drivers/target/target_core_xcopy.c
drivers/target/tcm_fc/tfc_sess.c
drivers/thermal/thermal_hwmon.c
drivers/tty/tty_ldsem.c
drivers/usb/core/quirks.c
drivers/usb/gadget/function/f_fs.c
drivers/usb/mon/mon_main.c
drivers/usb/musb/musb_core.c
drivers/usb/musb/musb_core.h
drivers/usb/serial/option.c
drivers/usb/serial/pl2303.c
drivers/usb/serial/pl2303.h
drivers/usb/serial/qcserial.c
drivers/vfio/vfio_iommu_spapr_tce.c
drivers/vfio/vfio_iommu_type1.c
drivers/vhost/vhost.c
drivers/vhost/vsock.c
drivers/video/fbdev/core/fbcmap.c
drivers/virtio/virtio_mmio.c
drivers/xen/swiotlb-xen.c
fs/Kconfig
fs/binfmt_elf.c
fs/binfmt_elf_fdpic.c
fs/block_dev.c
fs/btrfs/compression.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/cifs/readdir.c
fs/compat_binfmt_elf.c
fs/dax.c
fs/exofs/sys.c
fs/ext2/Kconfig
fs/ext4/Kconfig
fs/fscache/cookie.c
fs/fscache/netfs.c
fs/fscache/object.c
fs/fuse/dev.c
fs/fuse/fuse_i.h
fs/iomap.c
fs/jbd2/commit.c
fs/jbd2/journal.c
fs/nfs/nfs4proc.c
fs/nfs/nfs4state.c
fs/nfs/pnfs.c
fs/nfsd/nfs4layouts.c
fs/nfsd/nfs4state.c
fs/nfsd/state.h
fs/ocfs2/cluster/netdebug.c
fs/ocfs2/cluster/tcp.c
fs/ocfs2/dlm/dlmdebug.c
fs/ocfs2/dlm/dlmdomain.c
fs/ocfs2/dlm/dlmmaster.c
fs/ocfs2/dlm/dlmunlock.c
fs/proc/array.c
fs/proc/base.c
fs/proc/page.c
fs/proc/stat.c
fs/proc/uptime.c
fs/pstore/ram.c
fs/romfs/super.c
fs/splice.c
fs/timerfd.c
fs/userfaultfd.c
fs/xfs/libxfs/xfs_ag_resv.c
fs/xfs/libxfs/xfs_attr.c
fs/xfs/libxfs/xfs_bmap.c
fs/xfs/libxfs/xfs_bmap.h
fs/xfs/libxfs/xfs_ialloc_btree.c
fs/xfs/libxfs/xfs_ialloc_btree.h
fs/xfs/libxfs/xfs_sb.c
fs/xfs/xfs_bmap_util.c
fs/xfs/xfs_buf.c
fs/xfs/xfs_inode.c
fs/xfs/xfs_iomap.c
fs/xfs/xfs_mount.h
fs/xfs/xfs_qm.c
include/asm-generic/cputime.h [deleted file]
include/asm-generic/cputime_jiffies.h [deleted file]
include/asm-generic/cputime_nsecs.h [deleted file]
include/asm-generic/export.h
include/asm-generic/rwsem.h
include/drm/drmP.h
include/drm/drm_atomic.h
include/drm/drm_connector.h
include/drm/drm_framebuffer.h
include/drm/drm_mode_config.h
include/drm/ttm/ttm_bo_api.h
include/drm/ttm/ttm_bo_driver.h
include/dt-bindings/thermal/lm90.h [new file with mode: 0644]
include/linux/acpi.h
include/linux/bcma/bcma_driver_chipcommon.h
include/linux/bpf-cgroup.h
include/linux/bpf.h
include/linux/buffer_head.h
include/linux/can/core.h
include/linux/clockchips.h
include/linux/clocksource.h
include/linux/compat.h
include/linux/cpufreq.h
include/linux/cpuhotplug.h
include/linux/cpumask.h
include/linux/cputime.h
include/linux/delay.h
include/linux/delayacct.h
include/linux/devfreq.h
include/linux/dma-iommu.h
include/linux/dma-mapping.h
include/linux/edac.h
include/linux/efi-bgrt.h
include/linux/efi.h
include/linux/export.h
include/linux/fscache-cache.h
include/linux/fsl_ifc.h
include/linux/gpio/driver.h
include/linux/hrtimer.h
include/linux/hyperv.h
include/linux/init_task.h
include/linux/intel-iommu.h
include/linux/intel_pmic_gpio.h [deleted file]
include/linux/iommu.h
include/linux/irq.h
include/linux/irqchip/arm-gic-v3.h
include/linux/irqdomain.h
include/linux/jiffies.h
include/linux/jump_label.h
include/linux/kernel_stat.h
include/linux/kprobes.h
include/linux/kref.h
include/linux/leds.h
include/linux/llist.h
include/linux/log2.h
include/linux/math64.h
include/linux/memory_hotplug.h
include/linux/mfd/axp20x.h
include/linux/mfd/lpc_ich.h
include/linux/micrel_phy.h
include/linux/mmzone.h
include/linux/module.h
include/linux/msi.h
include/linux/mtd/fsmc.h [deleted file]
include/linux/mtd/mtd.h
include/linux/mtd/nand.h
include/linux/mtd/partitions.h
include/linux/mtd/spi-nor.h
include/linux/mutex.h
include/linux/netdevice.h
include/linux/nfs4.h
include/linux/nmi.h
include/linux/of_iommu.h
include/linux/percpu-refcount.h
include/linux/percpu-rwsem.h
include/linux/perf_event.h
include/linux/phy.h
include/linux/phy_led_triggers.h
include/linux/platform_data/intel-spi.h [new file with mode: 0644]
include/linux/platform_data/spi-ep93xx.h
include/linux/pm_domain.h
include/linux/pm_qos.h
include/linux/poison.h
include/linux/posix-timers.h
include/linux/power/bq27xxx_battery.h
include/linux/pxa2xx_ssp.h
include/linux/rcupdate.h
include/linux/rcutiny.h
include/linux/rcuwait.h [new file with mode: 0644]
include/linux/refcount.h [new file with mode: 0644]
include/linux/regmap.h
include/linux/sched.h
include/linux/sched/sysctl.h
include/linux/spinlock.h
include/linux/spinlock_api_smp.h
include/linux/spinlock_api_up.h
include/linux/srcu.h
include/linux/sunrpc/cache.h
include/linux/sunrpc/clnt.h
include/linux/suspend.h
include/linux/timer.h
include/linux/virtio_net.h
include/linux/vtime.h
include/linux/ww_mutex.h
include/net/bluetooth/hci_core.h
include/net/cipso_ipv4.h
include/net/ipv6.h
include/net/lwtunnel.h
include/net/netfilter/nf_tables.h
include/net/netfilter/nft_fib.h
include/net/sock.h
include/rdma/ib_verbs.h
include/soc/arc/mcip.h
include/soc/at91/at91sam9_ddrsdr.h
include/target/target_core_base.h
include/trace/events/rcu.h
include/trace/events/timer.h
include/uapi/linux/bpf.h
include/uapi/linux/cec-funcs.h
include/uapi/linux/ethtool.h
include/uapi/linux/l2tp.h
include/uapi/linux/netfilter/nf_log.h
include/uapi/linux/netfilter/nf_tables.h
include/uapi/linux/seg6.h
include/uapi/linux/videodev2.h
include/uapi/rdma/Kbuild
include/uapi/rdma/cxgb3-abi.h
include/uapi/rdma/ib_user_verbs.h
init/Kconfig
init/main.c
init/version.c
kernel/acct.c
kernel/bpf/arraymap.c
kernel/bpf/cgroup.c
kernel/bpf/hashtab.c
kernel/bpf/stackmap.c
kernel/bpf/syscall.c
kernel/cgroup.c
kernel/delayacct.c
kernel/events/core.c
kernel/exit.c
kernel/extable.c
kernel/fork.c
kernel/futex.c
kernel/irq/devres.c
kernel/irq/irqdomain.c
kernel/irq/msi.c
kernel/irq/proc.c
kernel/irq/spurious.c
kernel/kprobes.c
kernel/kthread.c
kernel/locking/Makefile
kernel/locking/lockdep.c
kernel/locking/locktorture.c
kernel/locking/mutex-debug.h
kernel/locking/mutex.c
kernel/locking/mutex.h
kernel/locking/percpu-rwsem.c
kernel/locking/qspinlock_paravirt.h
kernel/locking/rtmutex.c
kernel/locking/rwsem-spinlock.c
kernel/locking/rwsem-xadd.c
kernel/locking/semaphore.c
kernel/locking/spinlock.c
kernel/locking/spinlock_debug.c
kernel/locking/test-ww_mutex.c [new file with mode: 0644]
kernel/membarrier.c
kernel/module.c
kernel/panic.c
kernel/pid.c
kernel/power/suspend.c
kernel/power/suspend_test.c
kernel/power/swap.c
kernel/printk/printk.c
kernel/rcu/rcutorture.c
kernel/rcu/srcu.c
kernel/rcu/tiny.c
kernel/rcu/tree.c
kernel/rcu/tree.h
kernel/rcu/tree_exp.h
kernel/rcu/tree_plugin.h
kernel/rcu/tree_trace.c
kernel/rcu/update.c
kernel/sched/Makefile
kernel/sched/autogroup.c [moved from kernel/sched/auto_group.c with 100% similarity]
kernel/sched/autogroup.h [moved from kernel/sched/auto_group.h with 100% similarity]
kernel/sched/clock.c
kernel/sched/completion.c
kernel/sched/core.c
kernel/sched/cpuacct.c
kernel/sched/cputime.c
kernel/sched/deadline.c
kernel/sched/debug.c
kernel/sched/fair.c
kernel/sched/idle_task.c
kernel/sched/rt.c
kernel/sched/sched.h
kernel/sched/stats.h
kernel/sched/stop_task.c
kernel/sched/topology.c [new file with mode: 0644]
kernel/signal.c
kernel/stacktrace.c
kernel/sys.c
kernel/sysctl.c
kernel/time/Makefile
kernel/time/clocksource.c
kernel/time/hrtimer.c
kernel/time/itimer.c
kernel/time/jiffies.c
kernel/time/posix-cpu-timers.c
kernel/time/tick-broadcast.c
kernel/time/tick-sched.c
kernel/time/tick-sched.h
kernel/time/time.c
kernel/time/timeconst.bc
kernel/time/timekeeping.c
kernel/time/timekeeping.h
kernel/time/timekeeping_debug.c
kernel/time/timer.c
kernel/time/timer_list.c
kernel/time/timer_stats.c [deleted file]
kernel/trace/trace_hwlat.c
kernel/trace/trace_kprobe.c
kernel/tsacct.c
kernel/ucount.c
kernel/watchdog.c
kernel/watchdog_hld.c
kernel/workqueue.c
lib/Kconfig.debug
lib/debugobjects.c
lib/ioremap.c
lib/radix-tree.c
lib/timerqueue.c
mm/filemap.c
mm/huge_memory.c
mm/kasan/report.c
mm/memcontrol.c
mm/memory_hotplug.c
mm/mempolicy.c
mm/page_alloc.c
mm/shmem.c
mm/slub.c
mm/zswap.c
net/batman-adv/fragmentation.c
net/bluetooth/6lowpan.c
net/bluetooth/a2mp.c
net/bluetooth/amp.c
net/bluetooth/l2cap_core.c
net/bridge/br_netlink.c
net/can/af_can.c
net/can/af_can.h
net/can/bcm.c
net/can/gw.c
net/can/raw.c
net/ceph/messenger.c
net/ceph/osd_client.c
net/core/datagram.c
net/core/dev.c
net/core/ethtool.c
net/core/lwt_bpf.c
net/core/lwtunnel.c
net/core/neighbour.c
net/dccp/input.c
net/dccp/ipv6.c
net/dsa/dsa2.c
net/dsa/slave.c
net/ethernet/eth.c
net/ipv4/arp.c
net/ipv4/cipso_ipv4.c
net/ipv4/fib_frontend.c
net/ipv4/igmp.c
net/ipv4/ip_output.c
net/ipv4/ip_sockglue.c
net/ipv4/ip_tunnel_core.c
net/ipv4/netfilter/ipt_CLUSTERIP.c
net/ipv4/netfilter/ipt_rpfilter.c
net/ipv4/netfilter/nf_reject_ipv4.c
net/ipv4/netfilter/nft_fib_ipv4.c
net/ipv4/ping.c
net/ipv4/tcp.c
net/ipv4/tcp_fastopen.c
net/ipv4/tcp_input.c
net/ipv4/tcp_output.c
net/ipv4/tcp_probe.c
net/ipv4/udp.c
net/ipv6/addrconf.c
net/ipv6/datagram.c
net/ipv6/exthdrs.c
net/ipv6/ila/ila_lwt.c
net/ipv6/inet6_connection_sock.c
net/ipv6/ip6_gre.c
net/ipv6/ip6_output.c
net/ipv6/ip6_tunnel.c
net/ipv6/mcast.c
net/ipv6/netfilter/ip6t_rpfilter.c
net/ipv6/netfilter/nf_reject_ipv6.c
net/ipv6/netfilter/nft_fib_ipv6.c
net/ipv6/route.c
net/ipv6/seg6.c
net/ipv6/seg6_hmac.c
net/ipv6/seg6_iptunnel.c
net/ipv6/sit.c
net/ipv6/tcp_ipv6.c
net/ipv6/udp.c
net/irda/irqueue.c
net/kcm/kcmsock.c
net/l2tp/l2tp_core.h
net/l2tp/l2tp_ip.c
net/l2tp/l2tp_ip6.c
net/llc/llc_conn.c
net/llc/llc_sap.c
net/mac80211/fils_aead.c
net/mac80211/mesh.c
net/mac80211/rate.c
net/mpls/af_mpls.c
net/mpls/mpls_iptunnel.c
net/netfilter/Kconfig
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_log.c
net/netfilter/nf_tables_api.c
net/netfilter/nft_dynset.c
net/netfilter/nft_log.c
net/netfilter/nft_lookup.c
net/netfilter/nft_objref.c
net/netfilter/nft_set_hash.c
net/netfilter/nft_set_rbtree.c
net/packet/af_packet.c
net/sched/cls_flower.c
net/sched/cls_matchall.c
net/sctp/ipv6.c
net/sctp/offload.c
net/sctp/socket.c
net/sunrpc/auth_gss/gss_rpc_xdr.c
net/sunrpc/cache.c
net/sunrpc/clnt.c
net/sunrpc/sunrpc_syms.c
net/sunrpc/svc_xprt.c
net/sunrpc/svcauth.c
net/sunrpc/xprtrdma/svc_rdma_transport.c
net/tipc/node.c
net/tipc/server.c
net/tipc/subscr.c
net/tipc/subscr.h
net/unix/af_unix.c
net/wireless/nl80211.c
samples/bpf/bpf_load.c
samples/bpf/tc_l2_redirect_kern.c
samples/bpf/test_cgrp2_attach.c
samples/bpf/test_cgrp2_attach2.c
samples/bpf/test_cgrp2_sock.c
samples/bpf/test_cgrp2_sock2.c
samples/bpf/tracex5_kern.c
samples/bpf/xdp_tx_iptunnel_kern.c
scripts/Makefile.build
scripts/analyze_suspend.py
scripts/genksyms/genksyms.c
scripts/kallsyms.c
scripts/mod/modpost.c
security/apparmor/include/apparmor.h
security/apparmor/include/policy.h
security/selinux/hooks.c
sound/core/seq/seq_memory.c
sound/core/seq/seq_queue.c
sound/pci/hda/patch_hdmi.c
sound/usb/line6/driver.c
tools/arch/arm/include/uapi/asm/kvm.h
tools/arch/powerpc/include/uapi/asm/kvm.h
tools/arch/x86/include/asm/cpufeatures.h
tools/arch/x86/include/uapi/asm/vmx.h
tools/build/Makefile.build
tools/include/linux/compiler-gcc.h [new file with mode: 0644]
tools/include/linux/compiler.h
tools/include/uapi/linux/bpf.h
tools/leds/Makefile
tools/leds/led_hw_brightness_mon.c [new file with mode: 0644]
tools/lib/api/Makefile
tools/lib/api/fs/fs.c
tools/lib/api/fs/fs.h
tools/lib/api/fs/tracing_path.c
tools/lib/bpf/bpf.c
tools/lib/bpf/bpf.h
tools/lib/bpf/libbpf.c
tools/lib/bpf/libbpf.h
tools/lib/subcmd/Makefile
tools/lib/subcmd/parse-options.c
tools/lib/subcmd/parse-options.h
tools/lib/traceevent/Makefile
tools/lib/traceevent/kbuffer-parse.c
tools/lib/traceevent/plugin_function.c
tools/objtool/arch/x86/decode.c
tools/perf/Build
tools/perf/Documentation/perf-c2c.txt
tools/perf/Documentation/perf-config.txt
tools/perf/Documentation/perf-diff.txt
tools/perf/Documentation/perf-ftrace.txt [new file with mode: 0644]
tools/perf/Documentation/perf-kallsyms.txt [new file with mode: 0644]
tools/perf/Documentation/perf-record.txt
tools/perf/Documentation/perf-sched.txt
tools/perf/Documentation/perf-script.txt
tools/perf/Documentation/perf-trace.txt
tools/perf/MANIFEST
tools/perf/Makefile.config
tools/perf/Makefile.perf
tools/perf/arch/arm64/Makefile
tools/perf/arch/arm64/include/dwarf-regs-table.h
tools/perf/arch/arm64/util/dwarf-regs.c
tools/perf/bench/futex-hash.c
tools/perf/bench/futex-lock-pi.c
tools/perf/bench/futex-requeue.c
tools/perf/bench/futex-wake-parallel.c
tools/perf/bench/futex-wake.c
tools/perf/bench/futex.h
tools/perf/bench/numa.c
tools/perf/builtin-c2c.c
tools/perf/builtin-diff.c
tools/perf/builtin-ftrace.c [new file with mode: 0644]
tools/perf/builtin-help.c
tools/perf/builtin-kallsyms.c [new file with mode: 0644]
tools/perf/builtin-kmem.c
tools/perf/builtin-list.c
tools/perf/builtin-probe.c
tools/perf/builtin-record.c
tools/perf/builtin-report.c
tools/perf/builtin-sched.c
tools/perf/builtin-script.c
tools/perf/builtin-stat.c
tools/perf/builtin-top.c
tools/perf/builtin-trace.c
tools/perf/builtin.h
tools/perf/command-list.txt
tools/perf/perf.c
tools/perf/pmu-events/arch/x86/broadwellde/uncore-cache.json [new file with mode: 0644]
tools/perf/pmu-events/arch/x86/broadwellde/uncore-memory.json [new file with mode: 0644]
tools/perf/pmu-events/arch/x86/broadwellde/uncore-power.json [new file with mode: 0644]
tools/perf/pmu-events/arch/x86/broadwellx/uncore-cache.json [new file with mode: 0644]
tools/perf/pmu-events/arch/x86/broadwellx/uncore-interconnect.json [new file with mode: 0644]
tools/perf/pmu-events/arch/x86/broadwellx/uncore-memory.json [new file with mode: 0644]
tools/perf/pmu-events/arch/x86/broadwellx/uncore-power.json [new file with mode: 0644]
tools/perf/pmu-events/arch/x86/haswellx/uncore-cache.json [new file with mode: 0644]
tools/perf/pmu-events/arch/x86/haswellx/uncore-interconnect.json [new file with mode: 0644]
tools/perf/pmu-events/arch/x86/haswellx/uncore-memory.json [new file with mode: 0644]
tools/perf/pmu-events/arch/x86/haswellx/uncore-power.json [new file with mode: 0644]
tools/perf/pmu-events/arch/x86/ivytown/uncore-cache.json [new file with mode: 0644]
tools/perf/pmu-events/arch/x86/ivytown/uncore-interconnect.json [new file with mode: 0644]
tools/perf/pmu-events/arch/x86/ivytown/uncore-memory.json [new file with mode: 0644]
tools/perf/pmu-events/arch/x86/ivytown/uncore-power.json [new file with mode: 0644]
tools/perf/pmu-events/arch/x86/jaketown/uncore-cache.json [new file with mode: 0644]
tools/perf/pmu-events/arch/x86/jaketown/uncore-interconnect.json [new file with mode: 0644]
tools/perf/pmu-events/arch/x86/jaketown/uncore-memory.json [new file with mode: 0644]
tools/perf/pmu-events/arch/x86/jaketown/uncore-power.json [new file with mode: 0644]
tools/perf/pmu-events/arch/x86/knightslanding/uncore-memory.json [new file with mode: 0644]
tools/perf/pmu-events/jevents.c
tools/perf/pmu-events/jevents.h
tools/perf/pmu-events/pmu-events.h
tools/perf/tests/Build
tools/perf/tests/bpf.c
tools/perf/tests/builtin-test.c
tools/perf/tests/llvm.c
tools/perf/tests/parse-events.c
tools/perf/tests/parse-no-sample-id-all.c
tools/perf/tests/perf-record.c
tools/perf/tests/tests.h
tools/perf/tests/unit_number__scnprintf.c [new file with mode: 0644]
tools/perf/ui/browsers/hists.c
tools/perf/ui/hist.c
tools/perf/ui/setup.c
tools/perf/util/Build
tools/perf/util/bpf-loader.c
tools/perf/util/callchain.c
tools/perf/util/callchain.h
tools/perf/util/config.c
tools/perf/util/data-convert-bt.c
tools/perf/util/dso.c
tools/perf/util/event.c
tools/perf/util/evlist.c
tools/perf/util/evlist.h
tools/perf/util/evsel.c
tools/perf/util/evsel_fprintf.c
tools/perf/util/header.c
tools/perf/util/hist.c
tools/perf/util/hist.h
tools/perf/util/intel-pt-decoder/Build
tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c
tools/perf/util/intel-pt.c
tools/perf/util/llvm-utils.c
tools/perf/util/machine.c
tools/perf/util/machine.h
tools/perf/util/map.c
tools/perf/util/parse-events.c
tools/perf/util/parse-events.y
tools/perf/util/pmu.c
tools/perf/util/pmu.h
tools/perf/util/probe-event.c
tools/perf/util/scripting-engines/Build
tools/perf/util/scripting-engines/trace-event-perl.c
tools/perf/util/session.c
tools/perf/util/strfilter.c
tools/perf/util/string.c
tools/perf/util/symbol.c
tools/perf/util/symbol_fprintf.c
tools/perf/util/thread_map.c
tools/perf/util/trace-event-info.c
tools/perf/util/trace-event-parse.c
tools/perf/util/trace-event-read.c
tools/perf/util/trace-event.h
tools/perf/util/unwind-libunwind-local.c
tools/perf/util/util.c
tools/perf/util/util.h
tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py [new file with mode: 0755]
tools/scripts/Makefile.include
tools/testing/selftests/bpf/test_lru_map.c
tools/testing/selftests/locking/ww_mutex.sh [new file with mode: 0644]
tools/testing/selftests/rcutorture/configs/lock/CFLIST
tools/testing/selftests/rcutorture/configs/lock/LOCK07 [new file with mode: 0644]
tools/testing/selftests/rcutorture/configs/lock/LOCK07.boot [new file with mode: 0644]
tools/testing/selftests/rcutorture/configs/rcu/CFcommon
tools/testing/selftests/rcutorture/configs/rcu/TINY01
tools/testing/selftests/rcutorture/configs/rcu/TINY02
tools/testing/selftests/rcutorture/configs/rcu/TREE01
tools/testing/selftests/rcutorture/configs/rcu/TREE02
tools/testing/selftests/rcutorture/configs/rcu/TREE03
tools/testing/selftests/rcutorture/configs/rcu/TREE04
tools/testing/selftests/rcutorture/configs/rcu/TREE05
tools/testing/selftests/rcutorture/configs/rcu/TREE06
tools/testing/selftests/rcutorture/configs/rcu/TREE07
tools/testing/selftests/rcutorture/configs/rcu/TREE08
tools/testing/selftests/rcutorture/doc/TREE_RCU-kconfig.txt
tools/testing/selftests/rcutorture/formal/srcu-cbmc/.gitignore [new file with mode: 0644]
tools/testing/selftests/rcutorture/formal/srcu-cbmc/Makefile [new file with mode: 0644]
tools/testing/selftests/rcutorture/formal/srcu-cbmc/empty_includes/linux/delay.h [new file with mode: 0644]
tools/testing/selftests/rcutorture/formal/srcu-cbmc/empty_includes/linux/export.h [new file with mode: 0644]
tools/testing/selftests/rcutorture/formal/srcu-cbmc/empty_includes/linux/mutex.h [new file with mode: 0644]
tools/testing/selftests/rcutorture/formal/srcu-cbmc/empty_includes/linux/percpu.h [new file with mode: 0644]
tools/testing/selftests/rcutorture/formal/srcu-cbmc/empty_includes/linux/preempt.h [new file with mode: 0644]
tools/testing/selftests/rcutorture/formal/srcu-cbmc/empty_includes/linux/rcupdate.h [new file with mode: 0644]
tools/testing/selftests/rcutorture/formal/srcu-cbmc/empty_includes/linux/sched.h [new file with mode: 0644]
tools/testing/selftests/rcutorture/formal/srcu-cbmc/empty_includes/linux/smp.h [new file with mode: 0644]
tools/testing/selftests/rcutorture/formal/srcu-cbmc/empty_includes/linux/workqueue.h [new file with mode: 0644]
tools/testing/selftests/rcutorture/formal/srcu-cbmc/empty_includes/uapi/linux/types.h [new file with mode: 0644]
tools/testing/selftests/rcutorture/formal/srcu-cbmc/include/linux/.gitignore [new file with mode: 0644]
tools/testing/selftests/rcutorture/formal/srcu-cbmc/include/linux/kconfig.h [new file with mode: 0644]
tools/testing/selftests/rcutorture/formal/srcu-cbmc/include/linux/types.h [new file with mode: 0644]
tools/testing/selftests/rcutorture/formal/srcu-cbmc/modify_srcu.awk [new file with mode: 0755]
tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/assume.h [new file with mode: 0644]
tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/barriers.h [new file with mode: 0644]
tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/bug_on.h [new file with mode: 0644]
tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/combined_source.c [new file with mode: 0644]
tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/config.h [new file with mode: 0644]
tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/include_srcu.c [new file with mode: 0644]
tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/int_typedefs.h [new file with mode: 0644]
tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/locks.h [new file with mode: 0644]
tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/misc.c [new file with mode: 0644]
tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/misc.h [new file with mode: 0644]
tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/percpu.h [new file with mode: 0644]
tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/preempt.c [new file with mode: 0644]
tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/preempt.h [new file with mode: 0644]
tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/simple_sync_srcu.c [new file with mode: 0644]
tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/workqueues.h [new file with mode: 0644]
tools/testing/selftests/rcutorture/formal/srcu-cbmc/tests/store_buffering/.gitignore [new file with mode: 0644]
tools/testing/selftests/rcutorture/formal/srcu-cbmc/tests/store_buffering/Makefile [new file with mode: 0644]
tools/testing/selftests/rcutorture/formal/srcu-cbmc/tests/store_buffering/assert_end.fail [new file with mode: 0644]
tools/testing/selftests/rcutorture/formal/srcu-cbmc/tests/store_buffering/force.fail [new file with mode: 0644]
tools/testing/selftests/rcutorture/formal/srcu-cbmc/tests/store_buffering/force2.fail [new file with mode: 0644]
tools/testing/selftests/rcutorture/formal/srcu-cbmc/tests/store_buffering/force3.fail [new file with mode: 0644]
tools/testing/selftests/rcutorture/formal/srcu-cbmc/tests/store_buffering/main.pass [new file with mode: 0644]
tools/testing/selftests/rcutorture/formal/srcu-cbmc/tests/store_buffering/test.c [new file with mode: 0644]
tools/testing/selftests/rcutorture/formal/srcu-cbmc/tests/test_script.sh [new file with mode: 0755]
tools/testing/selftests/x86/Makefile
tools/testing/selftests/x86/sysret_rip.c [new file with mode: 0644]

diff --git a/CREDITS b/CREDITS
index c58560701d13158f535046c09fd4b825922ced94..c5626bf06264e41e136fbe0c0420881e72de0ce2 100644 (file)
--- a/CREDITS
+++ b/CREDITS
@@ -2478,12 +2478,11 @@ S: D-90453 Nuernberg
 S: Germany
 
 N: Arnaldo Carvalho de Melo
-E: acme@ghostprotocols.net
+E: acme@kernel.org
 E: arnaldo.melo@gmail.com
 E: acme@redhat.com
-W: http://oops.ghostprotocols.net:81/blog/
 P: 1024D/9224DF01 D5DF E3BB E3C8 BCBB F8AD  841A B6AB 4681 9224 DF01
-D: IPX, LLC, DCCP, cyc2x, wl3501_cs, net/ hacks
+D: tools/, IPX, LLC, DCCP, cyc2x, wl3501_cs, net/ hacks
 S: Brazil
 
 N: Karsten Merker
diff --git a/Documentation/ABI/testing/sysfs-class-devfreq-event b/Documentation/ABI/testing/sysfs-class-devfreq-event
new file mode 100644 (file)
index 0000000..ceaf0f6
--- /dev/null
@@ -0,0 +1,25 @@
+What:          /sys/class/devfreq-event/event(x)/
+Date:          January 2017
+Contact:       Chanwoo Choi <cw00.choi@samsung.com>
+Description:
+               Provide a place in sysfs for the devfreq-event objects.
+               This allows accessing various devfreq-event specific variables.
+               The name of devfreq-event object denoted as 'event(x)' which
+               includes the unique number of 'x' for each devfreq-event object.
+
+What:          /sys/class/devfreq-event/event(x)/name
+Date:          January 2017
+Contact:       Chanwoo Choi <cw00.choi@samsung.com>
+Description:
+               The /sys/class/devfreq-event/event(x)/name attribute contains
+               the name of the devfreq-event object. This attribute is
+               read-only.
+
+What:          /sys/class/devfreq-event/event(x)/enable_count
+Date:          January 2017
+Contact:       Chanwoo Choi <cw00.choi@samsung.com>
+Description:
+               The /sys/class/devfreq-event/event(x)/enable_count attribute
+               contains the reference count to enable the devfreq-event
+               object. If the device is enabled, the value of attribute is
+               greater than zero.
index 491cdeedc195041a7f611f10490cc4ca569aeb9a..5f67f7ab277bc51af0bdb8ccb94ae03f6936ad6c 100644 (file)
@@ -23,6 +23,23 @@ Description:
                If the LED does not support different brightness levels, this
                should be 1.
 
+What:          /sys/class/leds/<led>/brightness_hw_changed
+Date:          January 2017
+KernelVersion: 4.11
+Description:
+               Last hardware set brightness level for this LED. Some LEDs
+               may be changed autonomously by hardware/firmware. Only LEDs
+               where this happens and the driver can detect this, will have
+               this file.
+
+               This file supports poll() to detect when the hardware changes
+               the brightness.
+
+               Reading this file will return the last brightness level set
+               by the hardware, this may be different from the current
+               brightness. Reading this file when no hw brightness change
+               event has happened will return an ENODATA error.
+
 What:          /sys/class/leds/<led>/trigger
 Date:          March 2006
 KernelVersion: 2.6.17
index 6568e0010e1a9f80ff0457b7e5178340429b3158..46ff929fd52a317808e1270d7c4a72de7a99970c 100644 (file)
@@ -138,3 +138,20 @@ Contact:   Mauro Carvalho Chehab <m.chehab@samsung.com>
 Description:   This attribute file will display what type of memory is
                currently on this csrow. Normally, either buffered or
                unbuffered memory (for example, Unbuffered-DDR3).
+
+What:          /sys/devices/system/edac/mc/mc*/(dimm|rank)*/dimm_ce_count
+Date:          October 2016
+Contact:       linux-edac@vger.kernel.org
+Description:   This attribute file displays the total count of correctable
+               errors that have occurred on this DIMM. This count is very important
+               to examine. CEs provide early indications that a DIMM is beginning
+               to fail. This count field should be monitored for non-zero values
+               and report such information to the system administrator.
+
+What:          /sys/devices/system/edac/mc/mc*/(dimm|rank)*/dimm_ue_count
+Date:          October 2016
+Contact:       linux-edac@vger.kernel.org
+Description:   This attribute file displays the total count of uncorrectable
+               errors that have occurred on this DIMM. If panic_on_ue is set, this
+               counter will not have a chance to increment, since EDAC will panic the
+               system
index 9b31556cfdda8e6171b82cdfc05c51a251ff2428..35c64e00b35c055c4382762bbb782cbc70b5e72f 100644 (file)
@@ -12,3 +12,15 @@ Description: /sys/kernel/iommu_groups/ contains a number of sub-
                file if the IOMMU driver has chosen to register a more
                common name for the group.
 Users:
+
+What:          /sys/kernel/iommu_groups/reserved_regions
+Date:          January 2017
+KernelVersion:  v4.11
+Contact:       Eric Auger <eric.auger@redhat.com>
+Description:    /sys/kernel/iommu_groups/reserved_regions list IOVA
+               regions that are reserved. Not necessarily all
+               reserved regions are listed. This is typically used to
+               output direct-mapped, MSI, non mappable regions. Each
+               region is described on a single line: the 1st field is
+               the base IOVA, the second is the end IOVA and the third
+               field describes the type of the region.
index 98bf7ac29aad8fff65e88bc6b1e6c2a6fc3a5033..44c6bc496eee6b140a779afa7b159c0a261326b9 100644 (file)
@@ -143,3 +143,13 @@ So, this provides a way for drivers to avoid those error messages on calls
 where allocation failures are not a problem, and shouldn't bother the logs.
 
 NOTE: At the moment DMA_ATTR_NO_WARN is only implemented on PowerPC.
+
+DMA_ATTR_PRIVILEGED
+------------------------------
+
+Some advanced peripherals such as remote processors and GPUs perform
+accesses to DMA buffers in both privileged "supervisor" and unprivileged
+"user" modes.  This attribute is used to indicate to the DMA-mapping
+subsystem that the buffer is fully accessible at the elevated privilege
+level (and ideally inaccessible or at least read-only at the
+lesser-privileged levels).
index 7eb47ac25ad772bdc25b812e016982d974752f8c..d583c653a703f0645c10c11cd8c1dfe761095cd6 100644 (file)
@@ -4,7 +4,7 @@
         <head><title>A Tour Through TREE_RCU's Data Structures [LWN.net]</title>
         <meta HTTP-EQUIV="Content-Type" CONTENT="text/html; charset=iso-8859-1">
 
-           <p>January 27, 2016</p>
+           <p>December 18, 2016</p>
            <p>This article was contributed by Paul E.&nbsp;McKenney</p>
 
 <h3>Introduction</h3>
@@ -31,9 +31,6 @@ to each other.
        Accessor Functions</a>
 </ol>
 
-At the end we have the
-<a href="#Answers to Quick Quizzes">answers to the quick quizzes</a>.
-
 <h3><a name="Data-Structure Relationships">Data-Structure Relationships</a></h3>
 
 <p>RCU is for all intents and purposes a large state machine, and its
diff --git a/Documentation/RCU/Design/Expedited-Grace-Periods/ExpRCUFlow.svg b/Documentation/RCU/Design/Expedited-Grace-Periods/ExpRCUFlow.svg
new file mode 100644 (file)
index 0000000..7c6c90b
--- /dev/null
@@ -0,0 +1,830 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Creator: fig2dev Version 3.2 Patchlevel 5e -->
+
+<!-- CreationDate: Wed Dec  9 17:39:46 2015 -->
+
+<!-- Magnification: 3.000 -->
+
+<svg
+   xmlns:dc="http://purl.org/dc/elements/1.1/"
+   xmlns:cc="http://creativecommons.org/ns#"
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+   xmlns:svg="http://www.w3.org/2000/svg"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   width="952.6817"
+   height="1219.6219"
+   viewBox="-66 -66 12729.905 16296.808"
+   id="svg2"
+   version="1.1"
+   inkscape:version="0.48.4 r9939"
+   sodipodi:docname="ExpRCUFlow.svg">
+  <metadata
+     id="metadata94">
+    <rdf:RDF>
+      <cc:Work
+         rdf:about="">
+        <dc:format>image/svg+xml</dc:format>
+        <dc:type
+           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+        <dc:title />
+      </cc:Work>
+    </rdf:RDF>
+  </metadata>
+  <defs
+     id="defs92">
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Lend"
+       style="overflow:visible">
+      <path
+         id="path4146"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+         inkscape:connector-curvature="0" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow1Mend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow1Mend"
+       style="overflow:visible">
+      <path
+         id="path3852"
+         d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+         style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+         transform="matrix(-0.4,0,0,-0.4,-4,0)"
+         inkscape:connector-curvature="0" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow1Mend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow1Mend-9"
+       style="overflow:visible">
+      <path
+         inkscape:connector-curvature="0"
+         id="path3852-7"
+         d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+         style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+         transform="matrix(-0.4,0,0,-0.4,-4,0)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Lend-7"
+       style="overflow:visible">
+      <path
+         inkscape:connector-curvature="0"
+         id="path4146-6"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="matrix(-1.1,0,0,-1.1,-1.1,0)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Lend-1"
+       style="overflow:visible">
+      <path
+         inkscape:connector-curvature="0"
+         id="path4146-4"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="matrix(-1.1,0,0,-1.1,-1.1,0)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Lend-16"
+       style="overflow:visible">
+      <path
+         inkscape:connector-curvature="0"
+         id="path4146-8"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="matrix(-1.1,0,0,-1.1,-1.1,0)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Lend-160"
+       style="overflow:visible">
+      <path
+         inkscape:connector-curvature="0"
+         id="path4146-5"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="matrix(-1.1,0,0,-1.1,-1.1,0)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Lend-78"
+       style="overflow:visible">
+      <path
+         inkscape:connector-curvature="0"
+         id="path4146-66"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="matrix(-1.1,0,0,-1.1,-1.1,0)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Lend-8"
+       style="overflow:visible">
+      <path
+         inkscape:connector-curvature="0"
+         id="path4146-56"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="matrix(-1.1,0,0,-1.1,-1.1,0)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Lend-19"
+       style="overflow:visible">
+      <path
+         inkscape:connector-curvature="0"
+         id="path4146-89"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="matrix(-1.1,0,0,-1.1,-1.1,0)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Lend-85"
+       style="overflow:visible">
+      <path
+         inkscape:connector-curvature="0"
+         id="path4146-3"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="matrix(-1.1,0,0,-1.1,-1.1,0)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Lend-73"
+       style="overflow:visible">
+      <path
+         inkscape:connector-curvature="0"
+         id="path4146-55"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="matrix(-1.1,0,0,-1.1,-1.1,0)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Lend-5"
+       style="overflow:visible">
+      <path
+         inkscape:connector-curvature="0"
+         id="path4146-88"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="matrix(-1.1,0,0,-1.1,-1.1,0)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Lend-198"
+       style="overflow:visible">
+      <path
+         inkscape:connector-curvature="0"
+         id="path4146-2"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="matrix(-1.1,0,0,-1.1,-1.1,0)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Lend-4"
+       style="overflow:visible">
+      <path
+         inkscape:connector-curvature="0"
+         id="path4146-22"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="matrix(-1.1,0,0,-1.1,-1.1,0)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="marker5072"
+       style="overflow:visible">
+      <path
+         inkscape:connector-curvature="0"
+         id="path5074"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="matrix(-1.1,0,0,-1.1,-1.1,0)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Lend-87"
+       style="overflow:visible">
+      <path
+         inkscape:connector-curvature="0"
+         id="path4146-63"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="matrix(-1.1,0,0,-1.1,-1.1,0)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Lend-6"
+       style="overflow:visible">
+      <path
+         inkscape:connector-curvature="0"
+         id="path4146-26"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="matrix(-1.1,0,0,-1.1,-1.1,0)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Lend-0"
+       style="overflow:visible">
+      <path
+         inkscape:connector-curvature="0"
+         id="path4146-51"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="matrix(-1.1,0,0,-1.1,-1.1,0)" />
+    </marker>
+  </defs>
+  <sodipodi:namedview
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1"
+     objecttolerance="10"
+     gridtolerance="10"
+     guidetolerance="10"
+     inkscape:pageopacity="0"
+     inkscape:pageshadow="2"
+     inkscape:window-width="1090"
+     inkscape:window-height="1148"
+     id="namedview90"
+     showgrid="true"
+     inkscape:zoom="0.80021373"
+     inkscape:cx="462.49289"
+     inkscape:cy="623.19585"
+     inkscape:window-x="557"
+     inkscape:window-y="24"
+     inkscape:window-maximized="0"
+     inkscape:current-layer="g4"
+     inkscape:snap-grids="false"
+     fit-margin-top="5"
+     fit-margin-right="5"
+     fit-margin-bottom="5"
+     fit-margin-left="5" />
+  <g
+     style="fill:none;stroke-width:0.025in"
+     id="g4"
+     transform="translate(23.312813,523.41305)">
+    <!-- Line: box -->
+    <!-- Line: box -->
+    <!-- Line: box -->
+    <!-- Line: box -->
+    <!-- Line: box -->
+    <!-- Line: box -->
+    <!-- Line: box -->
+    <!-- Line -->
+    <!-- Arrowhead on XXXpoint 11475 2250 - 11475 3465-->
+    <!-- Line: box -->
+    <!-- Line: box -->
+    <!-- Line: box -->
+    <!-- Line: box -->
+    <!-- Line: box -->
+    <!-- Line -->
+    <!-- Arrowhead on XXXpoint 11475 5625 - 11475 6840-->
+    <!-- Line -->
+    <!-- Arrowhead on XXXpoint 7875 225 - 10665 225-->
+    <!-- Line -->
+    <!-- Arrowhead on XXXpoint 9675 675 - 7785 675-->
+    <!-- Line -->
+    <!-- Arrowhead on XXXpoint 9675 4725 - 10665 4725-->
+    <!-- Line -->
+    <!-- Arrowhead on XXXpoint 9225 5175 - 10665 5175-->
+    <!-- Line -->
+    <!-- Arrowhead on XXXpoint 8775 11475 - 10665 11475-->
+    <!-- Line: box -->
+    <!-- Line -->
+    <!-- Arrowhead on XXXpoint 11475 9000 - 11475 10215-->
+    <!-- Text -->
+    <!-- Text -->
+    <!-- Text -->
+    <!-- Text -->
+    <!-- Text -->
+    <!-- Text -->
+    <!-- Text -->
+    <!-- Text -->
+    <!-- Text -->
+    <!-- Text -->
+    <!-- Text -->
+    <!-- Text -->
+    <!-- Text -->
+    <g
+       id="g4104"
+       transform="translate(-1068.9745,0)">
+      <rect
+         transform="matrix(-0.70710678,0.70710678,-0.70710678,-0.70710678,0,0)"
+         y="-7383.8755"
+         x="-6124.8989"
+         height="1966.2251"
+         width="1953.6969"
+         id="rect3032-1-0"
+         style="fill:#96ff96;fill-opacity:1;stroke:#000000;stroke-width:45.00382233;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0" />
+      <text
+         sodipodi:linespacing="125%"
+         id="text4098"
+         y="818.40338"
+         x="8168.2671"
+         style="font-size:267.24359131px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+         xml:space="preserve"><tspan
+           y="818.40338"
+           x="8168.2671"
+           id="tspan4100"
+           sodipodi:role="line">Idle or</tspan><tspan
+           id="tspan4102"
+           y="1152.4579"
+           x="8168.2671"
+           sodipodi:role="line">offline?</tspan></text>
+    </g>
+    <g
+       id="g4114"
+       transform="translate(0,147.96969)">
+      <rect
+         id="rect6"
+         style="fill:#87cfff;stroke:#000000;stroke-width:45.00382233;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none"
+         rx="0"
+         height="1475.6636"
+         width="4401.7612"
+         y="0"
+         x="0" />
+      <text
+         sodipodi:linespacing="125%"
+         id="text4110"
+         y="835.11212"
+         x="2206.4917"
+         style="font-size:267.24359131px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+         xml:space="preserve"><tspan
+           y="835.11212"
+           x="2206.4917"
+           id="tspan4112"
+           sodipodi:role="line">CPU N Start</tspan></text>
+    </g>
+    <path
+       style="fill:none;stroke:#000000;stroke-width:40.08654022;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow2Lend)"
+       d="M 4432.5052,897.4924 5684.8749,880.79414"
+       id="path4119"
+       inkscape:connector-curvature="0"
+       sodipodi:nodetypes="cc" />
+    <path
+       style="fill:none;stroke:#000000;stroke-width:40.08654022;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow2Lend)"
+       d="M 8503.0006,874.12161 9755.3703,857.42334"
+       id="path4119-8"
+       inkscape:connector-curvature="0"
+       sodipodi:nodetypes="cc" />
+    <text
+       xml:space="preserve"
+       style="font-size:267.24359131px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+       x="8617.0977"
+       y="705.50983"
+       id="text4593"
+       sodipodi:linespacing="125%"><tspan
+         sodipodi:role="line"
+         id="tspan4595"
+         x="8617.0977"
+         y="705.50983">Y</tspan></text>
+    <g
+       style="fill:none;stroke-width:0.025in"
+       id="g4114-9"
+       transform="translate(9722.4732,131.27105)">
+      <rect
+         id="rect6-0"
+         style="fill:#87cfff;stroke:#000000;stroke-width:45.00382233;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none"
+         rx="0"
+         height="1425.5687"
+         width="2748.6331"
+         y="0"
+         x="80.17308" />
+      <text
+         sodipodi:linespacing="125%"
+         id="text4110-5"
+         y="835.11212"
+         x="1460.1007"
+         style="font-size:267.24359131px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+         xml:space="preserve"><tspan
+           y="835.11212"
+           x="1460.1007"
+           id="tspan4112-9"
+           sodipodi:role="line">Done</tspan></text>
+    </g>
+    <g
+       style="fill:none;stroke-width:0.025in"
+       id="g4114-5"
+       transform="translate(0,3705.3456)">
+      <rect
+         id="rect6-1"
+         style="fill:#87cfff;stroke:#000000;stroke-width:45.00382233;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none"
+         rx="0"
+         height="1475.6636"
+         width="4401.7612"
+         y="0"
+         x="0" />
+      <text
+         sodipodi:linespacing="125%"
+         id="text4110-9"
+         y="835.11212"
+         x="2206.4917"
+         style="font-size:267.24359131px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+         xml:space="preserve"><tspan
+           y="835.11212"
+           x="2206.4917"
+           sodipodi:role="line"
+           id="tspan4776">Send IPI to CPU N</tspan></text>
+    </g>
+    <path
+       style="fill:none;stroke:#000000;stroke-width:40.08654022;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow2Lend)"
+       d="M 7102.5627,2263.5171 4430.8404,3682.8694"
+       id="path4119-3"
+       inkscape:connector-curvature="0"
+       sodipodi:nodetypes="cc" />
+    <g
+       style="fill:none;stroke-width:0.025in"
+       id="g4104-1"
+       transform="translate(-1065.3349,6403.5782)">
+      <rect
+         transform="matrix(-0.70710678,0.70710678,-0.70710678,-0.70710678,0,0)"
+         y="-7383.8755"
+         x="-6124.8989"
+         height="1966.2251"
+         width="1953.6969"
+         id="rect3032-1-0-6"
+         style="fill:#96ff96;fill-opacity:1;stroke:#000000;stroke-width:45.00382233;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0" />
+      <text
+         sodipodi:linespacing="125%"
+         id="text4098-3"
+         y="482.00006"
+         x="8168.2671"
+         style="font-size:267.24359131px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+         xml:space="preserve"><tspan
+           id="tspan4102-8"
+           y="482.00006"
+           x="8168.2671"
+           sodipodi:role="line">In RCU</tspan><tspan
+           y="816.05457"
+           x="8168.2671"
+           sodipodi:role="line"
+           id="tspan4833">read-side</tspan><tspan
+           y="1150.109"
+           x="8168.2671"
+           sodipodi:role="line"
+           id="tspan4835">critical</tspan><tspan
+           y="1484.1636"
+           x="8168.2671"
+           sodipodi:role="line"
+           id="tspan4837">section?</tspan></text>
+    </g>
+    <text
+       xml:space="preserve"
+       style="font-size:267.24362183px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+       x="6463.0864"
+       y="2285.6765"
+       id="text4593-0"
+       sodipodi:linespacing="125%"><tspan
+         sodipodi:role="line"
+         id="tspan4595-6"
+         x="6463.0864"
+         y="2285.6765">N</tspan></text>
+    <path
+       style="fill:none;stroke:#000000;stroke-width:40.08654022;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:40.08654108, 80.17308215;stroke-dashoffset:0;marker-end:url(#Arrow2Lend)"
+       d="m 2189.1897,5219.361 16.6983,1252.3697"
+       id="path4119-0"
+       inkscape:connector-curvature="0"
+       sodipodi:nodetypes="cc" />
+    <g
+       style="fill:none;stroke-width:0.025in"
+       id="g4114-5-2"
+       transform="translate(0,6551.5479)">
+      <rect
+         id="rect6-1-7"
+         style="fill:#87cfff;stroke:#000000;stroke-width:45.00382233;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none"
+         rx="0"
+         height="1475.6636"
+         width="4401.7612"
+         y="0"
+         x="0" />
+      <text
+         sodipodi:linespacing="125%"
+         id="text4110-9-5"
+         y="835.11212"
+         x="2206.4917"
+         style="font-size:267.24359131px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+         xml:space="preserve"><tspan
+           y="835.11212"
+           x="2206.4917"
+           sodipodi:role="line"
+           id="tspan4776-5">IPI Handler</tspan></text>
+    </g>
+    <path
+       style="fill:none;stroke:#000000;stroke-width:40.08654022;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow2Lend)"
+       d="m 4432.5052,7297.9678 1252.3697,-16.6982"
+       id="path4119-2"
+       inkscape:connector-curvature="0"
+       sodipodi:nodetypes="cc" />
+    <path
+       style="fill:none;stroke:#000000;stroke-width:40.08654022;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow2Lend)"
+       d="m 8503.0013,7278.6595 1252.369,-16.6982"
+       id="path4119-8-7"
+       inkscape:connector-curvature="0"
+       sodipodi:nodetypes="cc" />
+    <text
+       xml:space="preserve"
+       style="font-size:267.24362183px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+       x="8617.0977"
+       y="7110.0186"
+       id="text4593-4"
+       sodipodi:linespacing="125%"><tspan
+         sodipodi:role="line"
+         id="tspan4595-0"
+         x="8617.0977"
+         y="7110.0186">N</tspan></text>
+    <g
+       style="fill:none;stroke-width:0.025in"
+       id="g4114-9-3"
+       transform="translate(9722.4732,6535.809)">
+      <rect
+         id="rect6-0-7"
+         style="fill:#87cfff;stroke:#000000;stroke-width:45.00382233;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none"
+         rx="0"
+         height="1425.5687"
+         width="2748.6331"
+         y="29.467337"
+         x="80.17308" />
+      <text
+         sodipodi:linespacing="125%"
+         id="text4110-5-7"
+         y="503.71591"
+         x="1460.1007"
+         style="font-size:267.24359131px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+         xml:space="preserve"><tspan
+           y="503.71591"
+           x="1460.1007"
+           id="tspan4112-9-0"
+           sodipodi:role="line">Report CPU</tspan><tspan
+           y="837.77039"
+           x="1460.1007"
+           sodipodi:role="line"
+           id="tspan4923">Quiescent</tspan><tspan
+           y="1171.825"
+           x="1460.1007"
+           sodipodi:role="line"
+           id="tspan4925">State</tspan></text>
+    </g>
+    <path
+       style="fill:none;stroke:#000000;stroke-width:40.08654022;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:40.08654335, 80.17308669;stroke-dashoffset:0;marker-end:url(#Arrow2Lend)"
+       d="m 7102.5627,8725.7454 16.6983,1252.3697"
+       id="path4119-0-0"
+       inkscape:connector-curvature="0"
+       sodipodi:nodetypes="cc" />
+    <text
+       xml:space="preserve"
+       style="font-size:267.24362183px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+       x="6797.0522"
+       y="9018.6807"
+       id="text4593-3"
+       sodipodi:linespacing="125%"><tspan
+         sodipodi:role="line"
+         id="tspan4595-2"
+         x="6797.0522"
+         y="9018.6807">Y</tspan></text>
+    <g
+       style="fill:none;stroke-width:0.025in"
+       id="g4114-9-3-8"
+       transform="translate(-80.17308,11381.108)">
+      <rect
+         id="rect6-0-7-5"
+         style="fill:#87cfff;stroke:#000000;stroke-width:45.00382233;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none"
+         rx="0"
+         height="1425.5687"
+         width="2748.6331"
+         y="29.467337"
+         x="80.17308" />
+      <text
+         sodipodi:linespacing="125%"
+         id="text4110-5-7-6"
+         y="841.88086"
+         x="1460.1007"
+         style="font-size:267.24359131px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+         xml:space="preserve"><tspan
+           y="841.88086"
+           x="1460.1007"
+           sodipodi:role="line"
+           id="tspan4925-1">rcu_read_unlock()</tspan></text>
+    </g>
+    <path
+       style="fill:none;stroke:#000000;stroke-width:40.08654022;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:40.08654562, 80.17309124;stroke-dashoffset:0;marker-end:url(#Arrow2Lend)"
+       d="m 1362.6256,10071.26 16.6983,1252.369"
+       id="path4119-0-0-7"
+       inkscape:connector-curvature="0"
+       sodipodi:nodetypes="cc" />
+    <path
+       style="fill:none;stroke:#000000;stroke-width:40.08654022;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;marker-end:url(#Arrow2Lend)"
+       d="m 1362.6256,12883.919 16.6983,1252.369"
+       id="path4119-0-0-7-7"
+       inkscape:connector-curvature="0"
+       sodipodi:nodetypes="cc" />
+    <g
+       style="fill:none;stroke-width:0.025in"
+       id="g4114-9-3-8-1"
+       transform="translate(9722.4732,11389.458)">
+      <rect
+         id="rect6-0-7-5-1"
+         style="fill:#87cfff;stroke:#000000;stroke-width:45.00382233;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none"
+         rx="0"
+         height="1425.5687"
+         width="2748.6331"
+         y="29.467337"
+         x="80.17308" />
+      <text
+         sodipodi:linespacing="125%"
+         id="text4110-5-7-6-2"
+         y="841.88086"
+         x="1460.1007"
+         style="font-size:267.24359131px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+         xml:space="preserve"><tspan
+           y="841.88086"
+           x="1460.1007"
+           sodipodi:role="line"
+           id="tspan4925-1-2">Context Switch</tspan></text>
+    </g>
+    <path
+       style="fill:none;stroke:#000000;stroke-width:40.08654022;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:40.08654789, 80.17309578;stroke-dashoffset:0;marker-end:url(#Arrow2Lend)"
+       d="m 11165.272,10071.26 16.698,1252.369"
+       id="path4119-0-0-7-8"
+       inkscape:connector-curvature="0"
+       sodipodi:nodetypes="cc" />
+    <g
+       style="fill:none;stroke-width:0.025in"
+       id="g4114-9-3-9"
+       transform="translate(-80.17308,14163.046)">
+      <rect
+         id="rect6-0-7-1"
+         style="fill:#87cfff;stroke:#000000;stroke-width:45.00382233;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none"
+         rx="0"
+         height="1425.5687"
+         width="2748.6331"
+         y="29.467337"
+         x="80.17308" />
+      <text
+         sodipodi:linespacing="125%"
+         id="text4110-5-7-3"
+         y="503.71591"
+         x="1460.1007"
+         style="font-size:267.24359131px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+         xml:space="preserve"><tspan
+           y="503.71591"
+           x="1460.1007"
+           id="tspan4112-9-0-4"
+           sodipodi:role="line">Report CPU</tspan><tspan
+           y="837.77039"
+           x="1460.1007"
+           sodipodi:role="line"
+           id="tspan4923-3">and Task</tspan><tspan
+           y="1171.825"
+           x="1460.1007"
+           sodipodi:role="line"
+           id="tspan4925-9">Quiescent States</tspan></text>
+    </g>
+    <g
+       style="fill:none;stroke-width:0.025in"
+       id="g4114-9-3-8-1-8"
+       transform="translate(5663.2978,11389.458)">
+      <rect
+         id="rect6-0-7-5-1-1"
+         style="fill:#87cfff;stroke:#000000;stroke-width:45.00382233;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none"
+         rx="0"
+         height="1425.5687"
+         width="2748.6331"
+         y="29.467337"
+         x="80.17308" />
+      <text
+         sodipodi:linespacing="125%"
+         id="text4110-5-7-6-2-4"
+         y="841.88086"
+         x="1460.1007"
+         style="font-size:267.24359131px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+         xml:space="preserve"><tspan
+           y="841.88086"
+           x="1460.1007"
+           sodipodi:role="line"
+           id="tspan4925-1-2-4">Enqueue Task</tspan></text>
+    </g>
+    <path
+       style="fill:none;stroke:#000000;stroke-width:40.08654022;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow2Lend)"
+       d="M 9827.612,12141.988 8575.243,12125.29"
+       id="path4119-8-7-5"
+       inkscape:connector-curvature="0"
+       sodipodi:nodetypes="cc" />
+    <path
+       style="fill:none;stroke:#000000;stroke-width:40.08654022;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;marker-end:url(#Arrow2Lend)"
+       d="m 7106.0965,12818.962 16.6983,1252.369"
+       id="path4119-0-0-7-7-5"
+       inkscape:connector-curvature="0"
+       sodipodi:nodetypes="cc" />
+    <g
+       style="fill:none;stroke-width:0.025in"
+       id="g4114-9-3-9-2"
+       transform="translate(5663.2978,14098.088)">
+      <rect
+         id="rect6-0-7-1-8"
+         style="fill:#87cfff;stroke:#000000;stroke-width:45.00382233;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none"
+         rx="0"
+         height="1425.5687"
+         width="2748.6331"
+         y="29.467337"
+         x="80.17308" />
+      <text
+         sodipodi:linespacing="125%"
+         id="text4110-5-7-3-4"
+         y="503.71591"
+         x="1460.1007"
+         style="font-size:267.24359131px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+         xml:space="preserve"><tspan
+           y="503.71591"
+           x="1460.1007"
+           sodipodi:role="line"
+           id="tspan4923-3-2">Report CPU</tspan><tspan
+           y="837.77039"
+           x="1460.1007"
+           sodipodi:role="line"
+           id="tspan4925-9-9">Quiescent</tspan><tspan
+           y="1171.825"
+           x="1460.1007"
+           sodipodi:role="line"
+           id="tspan5239">State</tspan></text>
+    </g>
+    <path
+       style="fill:none;stroke:#000000;stroke-width:40.08654022;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:40.08654562, 80.17309124;stroke-dashoffset:0;marker-end:url(#Arrow2Lend)"
+       d="M 5733.305,14095.542 2761.014,12809.774"
+       id="path4119-0-0-2"
+       inkscape:connector-curvature="0"
+       sodipodi:nodetypes="cc" />
+    <path
+       style="fill:none;stroke:#000000;stroke-width:40.08654022;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:40.08654107, 80.17308214;stroke-dashoffset:0"
+       d="m 1353.3524,10079.499 9701.6916,0 100.189,-16.698"
+       id="path5265"
+       inkscape:connector-curvature="0"
+       sodipodi:nodetypes="ccc" />
+  </g>
+</svg>
diff --git a/Documentation/RCU/Design/Expedited-Grace-Periods/ExpSchedFlow.svg b/Documentation/RCU/Design/Expedited-Grace-Periods/ExpSchedFlow.svg
new file mode 100644 (file)
index 0000000..e4233ac
--- /dev/null
@@ -0,0 +1,826 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Creator: fig2dev Version 3.2 Patchlevel 5e -->
+
+<!-- CreationDate: Wed Dec  9 17:39:46 2015 -->
+
+<!-- Magnification: 3.000 -->
+
+<svg
+   xmlns:dc="http://purl.org/dc/elements/1.1/"
+   xmlns:cc="http://creativecommons.org/ns#"
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+   xmlns:svg="http://www.w3.org/2000/svg"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   width="952.6817"
+   height="1425.6191"
+   viewBox="-66 -66 12729.905 19049.38"
+   id="svg2"
+   version="1.1"
+   inkscape:version="0.48.4 r9939"
+   sodipodi:docname="ExpSchedFlow.svg">
+  <metadata
+     id="metadata94">
+    <rdf:RDF>
+      <cc:Work
+         rdf:about="">
+        <dc:format>image/svg+xml</dc:format>
+        <dc:type
+           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+        <dc:title />
+      </cc:Work>
+    </rdf:RDF>
+  </metadata>
+  <defs
+     id="defs92">
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Lend"
+       style="overflow:visible">
+      <path
+         id="path4146"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+         inkscape:connector-curvature="0" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow1Mend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow1Mend"
+       style="overflow:visible">
+      <path
+         id="path3852"
+         d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+         style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+         transform="matrix(-0.4,0,0,-0.4,-4,0)"
+         inkscape:connector-curvature="0" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow1Mend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow1Mend-9"
+       style="overflow:visible">
+      <path
+         inkscape:connector-curvature="0"
+         id="path3852-7"
+         d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+         style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+         transform="matrix(-0.4,0,0,-0.4,-4,0)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Lend-7"
+       style="overflow:visible">
+      <path
+         inkscape:connector-curvature="0"
+         id="path4146-6"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="matrix(-1.1,0,0,-1.1,-1.1,0)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Lend-1"
+       style="overflow:visible">
+      <path
+         inkscape:connector-curvature="0"
+         id="path4146-4"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="matrix(-1.1,0,0,-1.1,-1.1,0)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Lend-16"
+       style="overflow:visible">
+      <path
+         inkscape:connector-curvature="0"
+         id="path4146-8"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="matrix(-1.1,0,0,-1.1,-1.1,0)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Lend-160"
+       style="overflow:visible">
+      <path
+         inkscape:connector-curvature="0"
+         id="path4146-5"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="matrix(-1.1,0,0,-1.1,-1.1,0)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Lend-78"
+       style="overflow:visible">
+      <path
+         inkscape:connector-curvature="0"
+         id="path4146-66"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="matrix(-1.1,0,0,-1.1,-1.1,0)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Lend-8"
+       style="overflow:visible">
+      <path
+         inkscape:connector-curvature="0"
+         id="path4146-56"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="matrix(-1.1,0,0,-1.1,-1.1,0)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Lend-19"
+       style="overflow:visible">
+      <path
+         inkscape:connector-curvature="0"
+         id="path4146-89"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="matrix(-1.1,0,0,-1.1,-1.1,0)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Lend-85"
+       style="overflow:visible">
+      <path
+         inkscape:connector-curvature="0"
+         id="path4146-3"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="matrix(-1.1,0,0,-1.1,-1.1,0)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Lend-73"
+       style="overflow:visible">
+      <path
+         inkscape:connector-curvature="0"
+         id="path4146-55"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="matrix(-1.1,0,0,-1.1,-1.1,0)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Lend-5"
+       style="overflow:visible">
+      <path
+         inkscape:connector-curvature="0"
+         id="path4146-88"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="matrix(-1.1,0,0,-1.1,-1.1,0)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Lend-198"
+       style="overflow:visible">
+      <path
+         inkscape:connector-curvature="0"
+         id="path4146-2"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="matrix(-1.1,0,0,-1.1,-1.1,0)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Lend-4"
+       style="overflow:visible">
+      <path
+         inkscape:connector-curvature="0"
+         id="path4146-22"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="matrix(-1.1,0,0,-1.1,-1.1,0)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="marker5072"
+       style="overflow:visible">
+      <path
+         inkscape:connector-curvature="0"
+         id="path5074"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="matrix(-1.1,0,0,-1.1,-1.1,0)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Lend-87"
+       style="overflow:visible">
+      <path
+         inkscape:connector-curvature="0"
+         id="path4146-63"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="matrix(-1.1,0,0,-1.1,-1.1,0)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Lend-6"
+       style="overflow:visible">
+      <path
+         inkscape:connector-curvature="0"
+         id="path4146-26"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="matrix(-1.1,0,0,-1.1,-1.1,0)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Lend-0"
+       style="overflow:visible">
+      <path
+         inkscape:connector-curvature="0"
+         id="path4146-51"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="matrix(-1.1,0,0,-1.1,-1.1,0)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Lend-58"
+       style="overflow:visible">
+      <path
+         id="path4146-61"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+         inkscape:connector-curvature="0" />
+    </marker>
+  </defs>
+  <sodipodi:namedview
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1"
+     objecttolerance="10"
+     gridtolerance="10"
+     guidetolerance="10"
+     inkscape:pageopacity="0"
+     inkscape:pageshadow="2"
+     inkscape:window-width="1090"
+     inkscape:window-height="1148"
+     id="namedview90"
+     showgrid="true"
+     inkscape:zoom="0.80021373"
+     inkscape:cx="462.49289"
+     inkscape:cy="473.6718"
+     inkscape:window-x="770"
+     inkscape:window-y="24"
+     inkscape:window-maximized="0"
+     inkscape:current-layer="g4114-9-3-9"
+     inkscape:snap-grids="false"
+     fit-margin-top="5"
+     fit-margin-right="5"
+     fit-margin-bottom="5"
+     fit-margin-left="5" />
+  <g
+     style="fill:none;stroke-width:0.025in"
+     id="g4"
+     transform="translate(23.312814,523.41265)">
+    <!-- Line: box -->
+    <!-- Line: box -->
+    <!-- Line: box -->
+    <!-- Line: box -->
+    <!-- Line: box -->
+    <!-- Line: box -->
+    <!-- Line: box -->
+    <!-- Line -->
+    <!-- Arrowhead on XXXpoint 11475 2250 - 11475 3465-->
+    <!-- Line: box -->
+    <!-- Line: box -->
+    <!-- Line: box -->
+    <!-- Line: box -->
+    <!-- Line: box -->
+    <!-- Line -->
+    <!-- Arrowhead on XXXpoint 11475 5625 - 11475 6840-->
+    <!-- Line -->
+    <!-- Arrowhead on XXXpoint 7875 225 - 10665 225-->
+    <!-- Line -->
+    <!-- Arrowhead on XXXpoint 9675 675 - 7785 675-->
+    <!-- Line -->
+    <!-- Arrowhead on XXXpoint 9675 4725 - 10665 4725-->
+    <!-- Line -->
+    <!-- Arrowhead on XXXpoint 9225 5175 - 10665 5175-->
+    <!-- Line -->
+    <!-- Arrowhead on XXXpoint 8775 11475 - 10665 11475-->
+    <!-- Line: box -->
+    <!-- Line -->
+    <!-- Arrowhead on XXXpoint 11475 9000 - 11475 10215-->
+    <!-- Text -->
+    <!-- Text -->
+    <!-- Text -->
+    <!-- Text -->
+    <!-- Text -->
+    <!-- Text -->
+    <!-- Text -->
+    <!-- Text -->
+    <!-- Text -->
+    <!-- Text -->
+    <!-- Text -->
+    <!-- Text -->
+    <!-- Text -->
+    <g
+       id="g4104"
+       transform="translate(-1068.9745,0)">
+      <rect
+         transform="matrix(-0.70710678,0.70710678,-0.70710678,-0.70710678,0,0)"
+         y="-7383.8755"
+         x="-6124.8989"
+         height="1966.2251"
+         width="1953.6969"
+         id="rect3032-1-0"
+         style="fill:#96ff96;fill-opacity:1;stroke:#000000;stroke-width:45.00382233;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0" />
+      <text
+         sodipodi:linespacing="125%"
+         id="text4098"
+         y="818.40338"
+         x="8168.2671"
+         style="font-size:267.24359131px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+         xml:space="preserve"><tspan
+           y="818.40338"
+           x="8168.2671"
+           id="tspan4100"
+           sodipodi:role="line">Idle or</tspan><tspan
+           id="tspan4102"
+           y="1152.4579"
+           x="8168.2671"
+           sodipodi:role="line">offline?</tspan></text>
+    </g>
+    <g
+       id="g4114"
+       transform="translate(0,147.96969)">
+      <rect
+         id="rect6"
+         style="fill:#87cfff;stroke:#000000;stroke-width:45.00382233;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none"
+         rx="0"
+         height="1475.6636"
+         width="4401.7612"
+         y="0"
+         x="0" />
+      <text
+         sodipodi:linespacing="125%"
+         id="text4110"
+         y="835.11212"
+         x="2206.4917"
+         style="font-size:267.24359131px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+         xml:space="preserve"><tspan
+           y="835.11212"
+           x="2206.4917"
+           id="tspan4112"
+           sodipodi:role="line">CPU N Start</tspan></text>
+    </g>
+    <path
+       style="fill:none;stroke:#000000;stroke-width:40.08654022;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow2Lend)"
+       d="M 4432.5052,897.4924 5684.8749,880.79414"
+       id="path4119"
+       inkscape:connector-curvature="0"
+       sodipodi:nodetypes="cc" />
+    <path
+       style="fill:none;stroke:#000000;stroke-width:40.08654022;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow2Lend)"
+       d="M 8503.0006,874.12161 9755.3703,857.42334"
+       id="path4119-8"
+       inkscape:connector-curvature="0"
+       sodipodi:nodetypes="cc" />
+    <text
+       xml:space="preserve"
+       style="font-size:267.24359131px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+       x="8617.0977"
+       y="705.50983"
+       id="text4593"
+       sodipodi:linespacing="125%"><tspan
+         sodipodi:role="line"
+         id="tspan4595"
+         x="8617.0977"
+         y="705.50983">Y</tspan></text>
+    <g
+       style="fill:none;stroke-width:0.025in"
+       id="g4114-9"
+       transform="translate(9722.4732,131.27105)">
+      <rect
+         id="rect6-0"
+         style="fill:#87cfff;stroke:#000000;stroke-width:45.00382233;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none"
+         rx="0"
+         height="1425.5687"
+         width="2748.6331"
+         y="0"
+         x="80.17308" />
+      <text
+         sodipodi:linespacing="125%"
+         id="text4110-5"
+         y="835.11212"
+         x="1460.1007"
+         style="font-size:267.24359131px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+         xml:space="preserve"><tspan
+           y="835.11212"
+           x="1460.1007"
+           id="tspan4112-9"
+           sodipodi:role="line">Done</tspan></text>
+    </g>
+    <g
+       style="fill:none;stroke-width:0.025in"
+       id="g4114-5"
+       transform="translate(0,3705.3456)">
+      <rect
+         id="rect6-1"
+         style="fill:#87cfff;stroke:#000000;stroke-width:45.00382233;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none"
+         rx="0"
+         height="1475.6636"
+         width="4401.7612"
+         y="0"
+         x="0" />
+      <text
+         sodipodi:linespacing="125%"
+         id="text4110-9"
+         y="835.11212"
+         x="2206.4917"
+         style="font-size:267.24359131px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+         xml:space="preserve"><tspan
+           y="835.11212"
+           x="2206.4917"
+           sodipodi:role="line"
+           id="tspan4776">Send IPI to CPU N</tspan></text>
+    </g>
+    <path
+       style="fill:none;stroke:#000000;stroke-width:40.08654022;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow2Lend)"
+       d="M 7102.5627,2263.5171 4430.8404,3682.8694"
+       id="path4119-3"
+       inkscape:connector-curvature="0"
+       sodipodi:nodetypes="cc" />
+    <g
+       style="fill:none;stroke-width:0.025in"
+       id="g4104-1"
+       transform="translate(-1065.3349,6403.5782)">
+      <rect
+         transform="matrix(-0.70710678,0.70710678,-0.70710678,-0.70710678,0,0)"
+         y="-7383.8755"
+         x="-6124.8989"
+         height="1966.2251"
+         width="1953.6969"
+         id="rect3032-1-0-6"
+         style="fill:#96ff96;fill-opacity:1;stroke:#000000;stroke-width:45.00382233;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0" />
+      <text
+         sodipodi:linespacing="125%"
+         id="text4098-3"
+         y="985.4306"
+         x="8168.2671"
+         style="font-size:267.24359131px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+         xml:space="preserve"><tspan
+           y="985.4306"
+           x="8168.2671"
+           sodipodi:role="line"
+           id="tspan3109">CPU idle?</tspan></text>
+    </g>
+    <text
+       xml:space="preserve"
+       style="font-size:267.24362183px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+       x="6463.0864"
+       y="2285.6765"
+       id="text4593-0"
+       sodipodi:linespacing="125%"><tspan
+         sodipodi:role="line"
+         id="tspan4595-6"
+         x="6463.0864"
+         y="2285.6765">N</tspan></text>
+    <path
+       style="fill:none;stroke:#000000;stroke-width:40.08654022;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:40.08654108, 80.17308215;stroke-dashoffset:0;marker-end:url(#Arrow2Lend)"
+       d="m 2189.1897,5219.361 16.6983,1252.3697"
+       id="path4119-0"
+       inkscape:connector-curvature="0"
+       sodipodi:nodetypes="cc" />
+    <g
+       style="fill:none;stroke-width:0.025in"
+       id="g4114-5-2"
+       transform="translate(0,6551.5479)">
+      <rect
+         id="rect6-1-7"
+         style="fill:#87cfff;stroke:#000000;stroke-width:45.00382233;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none"
+         rx="0"
+         height="1475.6636"
+         width="4401.7612"
+         y="0"
+         x="0" />
+      <text
+         sodipodi:linespacing="125%"
+         id="text4110-9-5"
+         y="835.11212"
+         x="2206.4917"
+         style="font-size:267.24359131px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+         xml:space="preserve"><tspan
+           y="835.11212"
+           x="2206.4917"
+           sodipodi:role="line"
+           id="tspan4776-5">IPI Handler</tspan></text>
+    </g>
+    <path
+       style="fill:none;stroke:#000000;stroke-width:40.08654022;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow2Lend)"
+       d="m 4432.5052,7297.9678 1252.3697,-16.6982"
+       id="path4119-2"
+       inkscape:connector-curvature="0"
+       sodipodi:nodetypes="cc" />
+    <path
+       style="fill:none;stroke:#000000;stroke-width:40.08654022;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow2Lend)"
+       d="m 8503.0013,7278.6595 1252.369,-16.6982"
+       id="path4119-8-7"
+       inkscape:connector-curvature="0"
+       sodipodi:nodetypes="cc" />
+    <text
+       xml:space="preserve"
+       style="font-size:267.24362183px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+       x="8617.0977"
+       y="7110.0186"
+       id="text4593-4"
+       sodipodi:linespacing="125%"><tspan
+         sodipodi:role="line"
+         id="tspan4595-0"
+         x="8617.0977"
+         y="7110.0186">Y</tspan></text>
+    <g
+       style="fill:none;stroke-width:0.025in"
+       id="g4114-9-3"
+       transform="translate(9722.4732,6535.809)">
+      <rect
+         id="rect6-0-7"
+         style="fill:#87cfff;stroke:#000000;stroke-width:45.00382233;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none"
+         rx="0"
+         height="1425.5687"
+         width="2748.6331"
+         y="29.467337"
+         x="80.17308" />
+      <text
+         sodipodi:linespacing="125%"
+         id="text4110-5-7"
+         y="503.71591"
+         x="1460.1007"
+         style="font-size:267.24359131px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+         xml:space="preserve"><tspan
+           y="503.71591"
+           x="1460.1007"
+           id="tspan4112-9-0"
+           sodipodi:role="line">Report CPU</tspan><tspan
+           y="837.77039"
+           x="1460.1007"
+           sodipodi:role="line"
+           id="tspan4923">Quiescent</tspan><tspan
+           y="1171.825"
+           x="1460.1007"
+           sodipodi:role="line"
+           id="tspan4925">State</tspan></text>
+    </g>
+    <path
+       style="fill:none;stroke:#000000;stroke-width:40.08654022;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:40.08654335, 80.17308669;stroke-dashoffset:0;marker-end:url(#Arrow2Lend)"
+       d="m 7102.5627,11478.337 16.6983,1252.35"
+       id="path4119-0-0"
+       inkscape:connector-curvature="0"
+       sodipodi:nodetypes="cc" />
+    <text
+       xml:space="preserve"
+       style="font-size:267.24362183px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+       x="6797.0522"
+       y="9018.6807"
+       id="text4593-3"
+       sodipodi:linespacing="125%"><tspan
+         sodipodi:role="line"
+         id="tspan4595-2"
+         x="6797.0522"
+         y="9018.6807">N</tspan></text>
+    <g
+       style="fill:none;stroke-width:0.025in"
+       id="g4114-9-3-8"
+       transform="translate(-80.17308,14133.68)">
+      <rect
+         id="rect6-0-7-5"
+         style="fill:#87cfff;stroke:#000000;stroke-width:45.00382233;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none"
+         rx="0"
+         height="1425.5687"
+         width="2748.6331"
+         y="29.467337"
+         x="80.17308" />
+      <text
+         sodipodi:linespacing="125%"
+         id="text4110-5-7-6"
+         y="841.88086"
+         x="1460.1007"
+         style="font-size:267.24359131px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+         xml:space="preserve"><tspan
+           y="841.88086"
+           x="1460.1007"
+           sodipodi:role="line"
+           id="tspan4925-1">Context Switch</tspan></text>
+    </g>
+    <path
+       style="fill:none;stroke:#000000;stroke-width:40.08654022;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:40.08654562, 80.17309124;stroke-dashoffset:0;marker-end:url(#Arrow2Lend)"
+       d="m 1362.6256,12823.832 16.6983,1252.369"
+       id="path4119-0-0-7"
+       inkscape:connector-curvature="0"
+       sodipodi:nodetypes="cc" />
+    <path
+       style="fill:none;stroke:#000000;stroke-width:40.08654022;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;marker-end:url(#Arrow2Lend)"
+       d="m 1362.6256,15636.491 16.6983,1252.369"
+       id="path4119-0-0-7-7"
+       inkscape:connector-curvature="0"
+       sodipodi:nodetypes="cc" />
+    <g
+       style="fill:none;stroke-width:0.025in"
+       id="g4114-9-3-8-1"
+       transform="translate(9722.4732,14142.03)">
+      <rect
+         id="rect6-0-7-5-1"
+         style="fill:#87cfff;stroke:#000000;stroke-width:45.00382233;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none"
+         rx="0"
+         height="1425.5687"
+         width="2748.6331"
+         y="29.467337"
+         x="80.17308" />
+      <text
+         sodipodi:linespacing="125%"
+         id="text4110-5-7-6-2"
+         y="841.88086"
+         x="1460.1007"
+         style="font-size:267.24359131px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+         xml:space="preserve"><tspan
+           y="841.88086"
+           x="1460.1007"
+           sodipodi:role="line"
+           id="tspan4925-1-2">CPU Offline</tspan></text>
+    </g>
+    <path
+       style="fill:none;stroke:#000000;stroke-width:40.08654022;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:40.08654789, 80.17309578;stroke-dashoffset:0;marker-end:url(#Arrow2Lend)"
+       d="m 11165.272,12823.832 16.698,1252.369"
+       id="path4119-0-0-7-8"
+       inkscape:connector-curvature="0"
+       sodipodi:nodetypes="cc" />
+    <g
+       style="fill:none;stroke-width:0.025in"
+       id="g4114-9-3-9"
+       transform="translate(-80.17308,16915.618)">
+      <rect
+         id="rect6-0-7-1"
+         style="fill:#87cfff;stroke:#000000;stroke-width:45.00382233;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none"
+         rx="0"
+         height="1425.5687"
+         width="2748.6331"
+         y="29.467337"
+         x="80.17308" />
+      <text
+         sodipodi:linespacing="125%"
+         id="text4110-5-7-3"
+         y="505.47754"
+         x="1460.1007"
+         style="font-size:267.24359131px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+         xml:space="preserve"><tspan
+           y="505.47754"
+           x="1460.1007"
+           id="tspan4112-9-0-4"
+           sodipodi:role="line">Report CPU</tspan><tspan
+           y="839.53204"
+           x="1460.1007"
+           sodipodi:role="line"
+           id="tspan4925-9">Quiescent</tspan><tspan
+           y="1173.5865"
+           x="1460.1007"
+           sodipodi:role="line"
+           id="tspan3168">State</tspan></text>
+    </g>
+    <path
+       style="fill:none;stroke:#000000;stroke-width:40.08654022;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;marker-end:url(#Arrow2Lend)"
+       d="m 11165.272,15571.534 16.698,1252.369"
+       id="path4119-0-0-7-7-5"
+       inkscape:connector-curvature="0"
+       sodipodi:nodetypes="cc" />
+    <g
+       style="fill:none;stroke-width:0.025in"
+       id="g4114-9-3-9-2"
+       transform="translate(9722.4732,16850.66)">
+      <rect
+         id="rect6-0-7-1-8"
+         style="fill:#87cfff;stroke:#000000;stroke-width:45.00382233;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none"
+         rx="0"
+         height="1425.5687"
+         width="2748.6331"
+         y="29.467337"
+         x="80.17308" />
+      <text
+         sodipodi:linespacing="125%"
+         id="text4110-5-7-3-4"
+         y="503.71591"
+         x="1460.1007"
+         style="font-size:267.24359131px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+         xml:space="preserve"><tspan
+           y="503.71591"
+           x="1460.1007"
+           sodipodi:role="line"
+           id="tspan4923-3-2">Report CPU</tspan><tspan
+           y="837.77039"
+           x="1460.1007"
+           sodipodi:role="line"
+           id="tspan4925-9-9">Quiescent</tspan><tspan
+           y="1171.825"
+           x="1460.1007"
+           sodipodi:role="line"
+           id="tspan5239">State</tspan></text>
+    </g>
+    <path
+       style="fill:none;stroke:#000000;stroke-width:40.08654022;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:40.08654107, 80.17308214;stroke-dashoffset:0"
+       d="m 1353.3524,12832.071 9701.6916,0 100.189,-16.698"
+       id="path5265"
+       inkscape:connector-curvature="0"
+       sodipodi:nodetypes="ccc" />
+    <path
+       style="fill:none;stroke:#000000;stroke-width:40.08654022;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;marker-end:url(#Arrow2Lend)"
+       d="m 7112.6465,8669.1867 16.6983,1252.369"
+       id="path4119-0-0-7-7-5-7"
+       inkscape:connector-curvature="0"
+       sodipodi:nodetypes="cc" />
+    <g
+       style="fill:none;stroke-width:0.025in"
+       id="g4114-9-3-8-1-8-3"
+       transform="translate(5663.1399,9972.3627)">
+      <rect
+         id="rect6-0-7-5-1-1-9"
+         style="fill:#87cfff;stroke:#000000;stroke-width:45.00382233;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none"
+         rx="0"
+         height="1425.5687"
+         width="2748.6331"
+         y="29.467337"
+         x="80.17308" />
+      <text
+         sodipodi:linespacing="125%"
+         id="text4110-5-7-6-2-4-0"
+         y="841.88086"
+         x="1460.1007"
+         style="font-size:267.24359131px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+         xml:space="preserve"><tspan
+           y="841.88086"
+           x="1460.1007"
+           sodipodi:role="line"
+           id="tspan4925-1-2-4-5">reched_cpu()</tspan></text>
+    </g>
+  </g>
+</svg>
diff --git a/Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.html b/Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.html
new file mode 100644 (file)
index 0000000..7a3194c
--- /dev/null
@@ -0,0 +1,626 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
+        "http://www.w3.org/TR/html4/loose.dtd">
+        <html>
+        <head><title>A Tour Through TREE_RCU's Expedited Grace Periods</title>
+        <meta HTTP-EQUIV="Content-Type" CONTENT="text/html; charset=iso-8859-1">
+
+<h2>Introduction</h2>
+
+This document describes RCU's expedited grace periods.
+Unlike RCU's normal grace periods, which accept long latencies to attain
+high efficiency and minimal disturbance, expedited grace periods accept
+lower efficiency and significant disturbance to attain shorter latencies.
+
+<p>
+There are three flavors of RCU (RCU-bh, RCU-preempt, and RCU-sched),
+but only two flavors of expedited grace periods because the RCU-bh
+expedited grace period maps onto the RCU-sched expedited grace period.
+Each of the remaining two implementations is covered in its own section.
+
+<ol>
+<li>   <a href="#Expedited Grace Period Design">
+       Expedited Grace Period Design</a>
+<li>   <a href="#RCU-preempt Expedited Grace Periods">
+       RCU-preempt Expedited Grace Periods</a>
+<li>   <a href="#RCU-sched Expedited Grace Periods">
+       RCU-sched Expedited Grace Periods</a>
+<li>   <a href="#Expedited Grace Period and CPU Hotplug">
+       Expedited Grace Period and CPU Hotplug</a>
+<li>   <a href="#Expedited Grace Period Refinements">
+       Expedited Grace Period Refinements</a>
+</ol>
+
+<h2><a name="Expedited Grace Period Design">
+Expedited Grace Period Design</a></h2>
+
+<p>
+The expedited RCU grace periods cannot be accused of being subtle,
+given that they for all intents and purposes hammer every CPU that
+has not yet provided a quiescent state for the current expedited
+grace period.
+The one saving grace is that the hammer has grown a bit smaller
+over time:  The old call to <tt>try_stop_cpus()</tt> has been
+replaced with a set of calls to <tt>smp_call_function_single()</tt>,
+each of which results in an IPI to the target CPU.
+The corresponding handler function checks the CPU's state, motivating
+a faster quiescent state where possible, and triggering a report
+of that quiescent state.
+As always for RCU, once everything has spent some time in a quiescent
+state, the expedited grace period has completed.
+
+<p>
+The details of the <tt>smp_call_function_single()</tt> handler's
+operation depend on the RCU flavor, as described in the following
+sections.
+
+<h2><a name="RCU-preempt Expedited Grace Periods">
+RCU-preempt Expedited Grace Periods</a></h2>
+
+<p>
+The overall flow of the handling of a given CPU by an RCU-preempt
+expedited grace period is shown in the following diagram:
+
+<p><img src="ExpRCUFlow.svg" alt="ExpRCUFlow.svg" width="55%">
+
+<p>
+The solid arrows denote direct action, for example, a function call.
+The dotted arrows denote indirect action, for example, an IPI
+or a state that is reached after some time.
+
+<p>
+If a given CPU is offline or idle, <tt>synchronize_rcu_expedited()</tt>
+will ignore it because idle and offline CPUs are already residing
+in quiescent states.
+Otherwise, the expedited grace period will use
+<tt>smp_call_function_single()</tt> to send the CPU an IPI, which
+is handled by <tt>sync_rcu_exp_handler()</tt>.
+
+<p>
+However, because this is preemptible RCU, <tt>sync_rcu_exp_handler()</tt>
+can check to see if the CPU is currently running in an RCU read-side
+critical section.
+If not, the handler can immediately report a quiescent state.
+Otherwise, it sets flags so that the outermost <tt>rcu_read_unlock()</tt>
+invocation will provide the needed quiescent-state report.
+This flag-setting avoids the previous forced preemption of all
+CPUs that might have RCU read-side critical sections.
+In addition, this flag-setting is done so as to avoid increasing
+the overhead of the common-case fastpath through the scheduler.
+
+<p>
+Again because this is preemptible RCU, an RCU read-side critical section
+can be preempted.
+When that happens, RCU will enqueue the task, which will the continue to
+block the current expedited grace period until it resumes and finds its
+outermost <tt>rcu_read_unlock()</tt>.
+The CPU will report a quiescent state just after enqueuing the task because
+the CPU is no longer blocking the grace period.
+It is instead the preempted task doing the blocking.
+The list of blocked tasks is managed by <tt>rcu_preempt_ctxt_queue()</tt>,
+which is called from <tt>rcu_preempt_note_context_switch()</tt>, which
+in turn is called from <tt>rcu_note_context_switch()</tt>, which in
+turn is called from the scheduler.
+
+<table>
+<tr><th>&nbsp;</th></tr>
+<tr><th align="left">Quick Quiz:</th></tr>
+<tr><td>
+       Why not just have the expedited grace period check the
+       state of all the CPUs?
+       After all, that would avoid all those real-time-unfriendly IPIs.
+</td></tr>
+<tr><th align="left">Answer:</th></tr>
+<tr><td bgcolor="#ffffff"><font color="ffffff">
+       Because we want the RCU read-side critical sections to run fast,
+       which means no memory barriers.
+       Therefore, it is not possible to safely check the state from some
+       other CPU.
+       And even if it was possible to safely check the state, it would
+       still be necessary to IPI the CPU to safely interact with the
+       upcoming <tt>rcu_read_unlock()</tt> invocation, which means that
+       the remote state testing would not help the worst-case
+       latency that real-time applications care about.
+
+       <p><font color="ffffff">One way to prevent your real-time
+       application from getting hit with these IPIs is to
+       build your kernel with <tt>CONFIG_NO_HZ_FULL=y</tt>.
+       RCU would then perceive the CPU running your application
+       as being idle, and it would be able to safely detect that
+       state without needing to IPI the CPU.
+</font></td></tr>
+<tr><td>&nbsp;</td></tr>
+</table>
+
+<p>
+Please note that this is just the overall flow:
+Additional complications can arise due to races with CPUs going idle
+or offline, among other things.
+
+<h2><a name="RCU-sched Expedited Grace Periods">
+RCU-sched Expedited Grace Periods</a></h2>
+
+<p>
+The overall flow of the handling of a given CPU by an RCU-sched
+expedited grace period is shown in the following diagram:
+
+<p><img src="ExpSchedFlow.svg" alt="ExpSchedFlow.svg" width="55%">
+
+<p>
+As with RCU-preempt's <tt>synchronize_rcu_expedited()</tt>,
+<tt>synchronize_sched_expedited()</tt> ignores offline and
+idle CPUs, again because they are in remotely detectable
+quiescent states.
+However, the <tt>synchronize_rcu_expedited()</tt> handler
+is <tt>sync_sched_exp_handler()</tt>, and because the
+<tt>rcu_read_lock_sched()</tt> and <tt>rcu_read_unlock_sched()</tt>
+leave no trace of their invocation, in general it is not possible to tell
+whether or not the current CPU is in an RCU read-side critical section.
+The best that <tt>sync_sched_exp_handler()</tt> can do is to check
+for idle, on the off-chance that the CPU went idle while the IPI
+was in flight.
+If the CPU is idle, then tt>sync_sched_exp_handler()</tt> reports
+the quiescent state.
+
+<p>
+Otherwise, the handler invokes <tt>resched_cpu()</tt>, which forces
+a future context switch.
+At the time of the context switch, the CPU reports the quiescent state.
+Should the CPU go offline first, it will report the quiescent state
+at that time.
+
+<h2><a name="Expedited Grace Period and CPU Hotplug">
+Expedited Grace Period and CPU Hotplug</a></h2>
+
+<p>
+The expedited nature of expedited grace periods require a much tighter
+interaction with CPU hotplug operations than is required for normal
+grace periods.
+In addition, attempting to IPI offline CPUs will result in splats, but
+failing to IPI online CPUs can result in too-short grace periods.
+Neither option is acceptable in production kernels.
+
+<p>
+The interaction between expedited grace periods and CPU hotplug operations
+is carried out at several levels:
+
+<ol>
+<li>   The number of CPUs that have ever been online is tracked
+       by the <tt>rcu_state</tt> structure's <tt>-&gt;ncpus</tt>
+       field.
+       The <tt>rcu_state</tt> structure's <tt>-&gt;ncpus_snap</tt>
+       field tracks the number of CPUs that have ever been online
+       at the beginning of an RCU expedited grace period.
+       Note that this number never decreases, at least in the absence
+       of a time machine.
+<li>   The identities of the CPUs that have ever been online is
+       tracked by the <tt>rcu_node</tt> structure's
+       <tt>-&gt;expmaskinitnext</tt> field.
+       The <tt>rcu_node</tt> structure's <tt>-&gt;expmaskinit</tt>
+       field tracks the identities of the CPUs that were online
+       at least once at the beginning of the most recent RCU
+       expedited grace period.
+       The <tt>rcu_state</tt> structure's <tt>-&gt;ncpus</tt> and
+       <tt>-&gt;ncpus_snap</tt> fields are used to detect when
+       new CPUs have come online for the first time, that is,
+       when the <tt>rcu_node</tt> structure's <tt>-&gt;expmaskinitnext</tt>
+       field has changed since the beginning of the last RCU
+       expedited grace period, which triggers an update of each
+       <tt>rcu_node</tt> structure's <tt>-&gt;expmaskinit</tt>
+       field from its <tt>-&gt;expmaskinitnext</tt> field.
+<li>   Each <tt>rcu_node</tt> structure's <tt>-&gt;expmaskinit</tt>
+       field is used to initialize that structure's
+       <tt>-&gt;expmask</tt> at the beginning of each RCU
+       expedited grace period.
+       This means that only those CPUs that have been online at least
+       once will be considered for a given grace period.
+<li>   Any CPU that goes offline will clear its bit in its leaf
+       <tt>rcu_node</tt> structure's <tt>-&gt;qsmaskinitnext</tt>
+       field, so any CPU with that bit clear can safely be ignored.
+       However, it is possible for a CPU coming online or going offline
+       to have this bit set for some time while <tt>cpu_online</tt>
+       returns <tt>false</tt>.
+<li>   For each non-idle CPU that RCU believes is currently online, the grace
+       period invokes <tt>smp_call_function_single()</tt>.
+       If this succeeds, the CPU was fully online.
+       Failure indicates that the CPU is in the process of coming online
+       or going offline, in which case it is necessary to wait for a
+       short time period and try again.
+       The purpose of this wait (or series of waits, as the case may be)
+       is to permit a concurrent CPU-hotplug operation to complete.
+<li>   In the case of RCU-sched, one of the last acts of an outgoing CPU
+       is to invoke <tt>rcu_report_dead()</tt>, which
+       reports a quiescent state for that CPU.
+       However, this is likely paranoia-induced redundancy. <!-- @@@ -->
+</ol>
+
+<table>
+<tr><th>&nbsp;</th></tr>
+<tr><th align="left">Quick Quiz:</th></tr>
+<tr><td>
+       Why all the dancing around with multiple counters and masks
+       tracking CPUs that were once online?
+       Why not just have a single set of masks tracking the currently
+       online CPUs and be done with it?
+</td></tr>
+<tr><th align="left">Answer:</th></tr>
+<tr><td bgcolor="#ffffff"><font color="ffffff">
+       Maintaining single set of masks tracking the online CPUs <i>sounds</i>
+       easier, at least until you try working out all the race conditions
+       between grace-period initialization and CPU-hotplug operations.
+       For example, suppose initialization is progressing down the
+       tree while a CPU-offline operation is progressing up the tree.
+       This situation can result in bits set at the top of the tree
+       that have no counterparts at the bottom of the tree.
+       Those bits will never be cleared, which will result in
+       grace-period hangs.
+       In short, that way lies madness, to say nothing of a great many
+       bugs, hangs, and deadlocks.
+
+       <p><font color="ffffff">
+       In contrast, the current multi-mask multi-counter scheme ensures
+       that grace-period initialization will always see consistent masks
+       up and down the tree, which brings significant simplifications
+       over the single-mask method.
+
+       <p><font color="ffffff">
+       This is an instance of
+       <a href="http://www.cs.columbia.edu/~library/TR-repository/reports/reports-1992/cucs-039-92.ps.gz"><font color="ffffff">
+       deferring work in order to avoid synchronization</a>.
+       Lazily recording CPU-hotplug events at the beginning of the next
+       grace period greatly simplifies maintenance of the CPU-tracking
+       bitmasks in the <tt>rcu_node</tt> tree.
+</font></td></tr>
+<tr><td>&nbsp;</td></tr>
+</table>
+
+<h2><a name="Expedited Grace Period Refinements">
+Expedited Grace Period Refinements</a></h2>
+
+<ol>
+<li>   <a href="#Idle-CPU Checks">Idle-CPU checks</a>.
+<li>   <a href="#Batching via Sequence Counter">
+       Batching via sequence counter</a>.
+<li>   <a href="#Funnel Locking and Wait/Wakeup">
+       Funnel locking and wait/wakeup</a>.
+<li>   <a href="#Use of Workqueues">Use of Workqueues</a>.
+<li>   <a href="#Stall Warnings">Stall warnings</a>.
+</ol>
+
+<h3><a name="Idle-CPU Checks">Idle-CPU Checks</a></h3>
+
+<p>
+Each expedited grace period checks for idle CPUs when initially forming
+the mask of CPUs to be IPIed and again just before IPIing a CPU
+(both checks are carried out by <tt>sync_rcu_exp_select_cpus()</tt>).
+If the CPU is idle at any time between those two times, the CPU will
+not be IPIed.
+Instead, the task pushing the grace period forward will include the
+idle CPUs in the mask passed to <tt>rcu_report_exp_cpu_mult()</tt>.
+
+<p>
+For RCU-sched, there is an additional check for idle in the IPI
+handler, <tt>sync_sched_exp_handler()</tt>.
+If the IPI has interrupted the idle loop, then
+<tt>sync_sched_exp_handler()</tt> invokes <tt>rcu_report_exp_rdp()</tt>
+to report the corresponding quiescent state.
+
+<p>
+For RCU-preempt, there is no specific check for idle in the
+IPI handler (<tt>sync_rcu_exp_handler()</tt>), but because
+RCU read-side critical sections are not permitted within the
+idle loop, if <tt>sync_rcu_exp_handler()</tt> sees that the CPU is within
+RCU read-side critical section, the CPU cannot possibly be idle.
+Otherwise, <tt>sync_rcu_exp_handler()</tt> invokes
+<tt>rcu_report_exp_rdp()</tt> to report the corresponding quiescent
+state, regardless of whether or not that quiescent state was due to
+the CPU being idle.
+
+<p>
+In summary, RCU expedited grace periods check for idle when building
+the bitmask of CPUs that must be IPIed, just before sending each IPI,
+and (either explicitly or implicitly) within the IPI handler.
+
+<h3><a name="Batching via Sequence Counter">
+Batching via Sequence Counter</a></h3>
+
+<p>
+If each grace-period request was carried out separately, expedited
+grace periods would have abysmal scalability and
+problematic high-load characteristics.
+Because each grace-period operation can serve an unlimited number of
+updates, it is important to <i>batch</i> requests, so that a single
+expedited grace-period operation will cover all requests in the
+corresponding batch.
+
+<p>
+This batching is controlled by a sequence counter named
+<tt>-&gt;expedited_sequence</tt> in the <tt>rcu_state</tt> structure.
+This counter has an odd value when there is an expedited grace period
+in progress and an even value otherwise, so that dividing the counter
+value by two gives the number of completed grace periods.
+During any given update request, the counter must transition from
+even to odd and then back to even, thus indicating that a grace
+period has elapsed.
+Therefore, if the initial value of the counter is <tt>s</tt>,
+the updater must wait until the counter reaches at least the
+value <tt>(s+3)&amp;~0x1</tt>.
+This counter is managed by the following access functions:
+
+<ol>
+<li>   <tt>rcu_exp_gp_seq_start()</tt>, which marks the start of
+       an expedited grace period.
+<li>   <tt>rcu_exp_gp_seq_end()</tt>, which marks the end of an
+       expedited grace period.
+<li>   <tt>rcu_exp_gp_seq_snap()</tt>, which obtains a snapshot of
+       the counter.
+<li>   <tt>rcu_exp_gp_seq_done()</tt>, which returns <tt>true</tt>
+       if a full expedited grace period has elapsed since the
+       corresponding call to <tt>rcu_exp_gp_seq_snap()</tt>.
+</ol>
+
+<p>
+Again, only one request in a given batch need actually carry out
+a grace-period operation, which means there must be an efficient
+way to identify which of many concurrent reqeusts will initiate
+the grace period, and that there be an efficient way for the
+remaining requests to wait for that grace period to complete.
+However, that is the topic of the next section.
+
+<h3><a name="Funnel Locking and Wait/Wakeup">
+Funnel Locking and Wait/Wakeup</a></h3>
+
+<p>
+The natural way to sort out which of a batch of updaters will initiate
+the expedited grace period is to use the <tt>rcu_node</tt> combining
+tree, as implemented by the <tt>exp_funnel_lock()</tt> function.
+The first updater corresponding to a given grace period arriving
+at a given <tt>rcu_node</tt> structure records its desired grace-period
+sequence number in the <tt>-&gt;exp_seq_rq</tt> field and moves up
+to the next level in the tree.
+Otherwise, if the <tt>-&gt;exp_seq_rq</tt> field already contains
+the sequence number for the desired grace period or some later one,
+the updater blocks on one of four wait queues in the
+<tt>-&gt;exp_wq[]</tt> array, using the second-from-bottom
+and third-from bottom bits as an index.
+An <tt>-&gt;exp_lock</tt> field in the <tt>rcu_node</tt> structure
+synchronizes access to these fields.
+
+<p>
+An empty <tt>rcu_node</tt> tree is shown in the following diagram,
+with the white cells representing the <tt>-&gt;exp_seq_rq</tt> field
+and the red cells representing the elements of the
+<tt>-&gt;exp_wq[]</tt> array.
+
+<p><img src="Funnel0.svg" alt="Funnel0.svg" width="75%">
+
+<p>
+The next diagram shows the situation after the arrival of Task&nbsp;A
+and Task&nbsp;B at the leftmost and rightmost leaf <tt>rcu_node</tt>
+structures, respectively.
+The current value of the <tt>rcu_state</tt> structure's
+<tt>-&gt;expedited_sequence</tt> field is zero, so adding three and
+clearing the bottom bit results in the value two, which both tasks
+record in the <tt>-&gt;exp_seq_rq</tt> field of their respective
+<tt>rcu_node</tt> structures:
+
+<p><img src="Funnel1.svg" alt="Funnel1.svg" width="75%">
+
+<p>
+Each of Tasks&nbsp;A and&nbsp;B will move up to the root
+<tt>rcu_node</tt> structure.
+Suppose that Task&nbsp;A wins, recording its desired grace-period sequence
+number and resulting in the state shown below:
+
+<p><img src="Funnel2.svg" alt="Funnel2.svg" width="75%">
+
+<p>
+Task&nbsp;A now advances to initiate a new grace period, while Task&nbsp;B
+moves up to the root <tt>rcu_node</tt> structure, and, seeing that
+its desired sequence number is already recorded, blocks on
+<tt>-&gt;exp_wq[1]</tt>.
+
+<table>
+<tr><th>&nbsp;</th></tr>
+<tr><th align="left">Quick Quiz:</th></tr>
+<tr><td>
+       Why <tt>-&gt;exp_wq[1]</tt>?
+       Given that the value of these tasks' desired sequence number is
+       two, so shouldn't they instead block on <tt>-&gt;exp_wq[2]</tt>?
+</td></tr>
+<tr><th align="left">Answer:</th></tr>
+<tr><td bgcolor="#ffffff"><font color="ffffff">
+       No.
+
+       <p><font color="ffffff">
+       Recall that the bottom bit of the desired sequence number indicates
+       whether or not a grace period is currently in progress.
+       It is therefore necessary to shift the sequence number right one
+       bit position to obtain the number of the grace period.
+       This results in <tt>-&gt;exp_wq[1]</tt>.
+</font></td></tr>
+<tr><td>&nbsp;</td></tr>
+</table>
+
+<p>
+If Tasks&nbsp;C and&nbsp;D also arrive at this point, they will compute the
+same desired grace-period sequence number, and see that both leaf
+<tt>rcu_node</tt> structures already have that value recorded.
+They will therefore block on their respective <tt>rcu_node</tt>
+structures' <tt>-&gt;exp_wq[1]</tt> fields, as shown below:
+
+<p><img src="Funnel3.svg" alt="Funnel3.svg" width="75%">
+
+<p>
+Task&nbsp;A now acquires the <tt>rcu_state</tt> structure's
+<tt>-&gt;exp_mutex</tt> and initiates the grace period, which
+increments <tt>-&gt;expedited_sequence</tt>.
+Therefore, if Tasks&nbsp;E and&nbsp;F arrive, they will compute
+a desired sequence number of 4 and will record this value as
+shown below:
+
+<p><img src="Funnel4.svg" alt="Funnel4.svg" width="75%">
+
+<p>
+Tasks&nbsp;E and&nbsp;F will propagate up the <tt>rcu_node</tt>
+combining tree, with Task&nbsp;F blocking on the root <tt>rcu_node</tt>
+structure and Task&nbsp;E wait for Task&nbsp;A to finish so that
+it can start the next grace period.
+The resulting state is as shown below:
+
+<p><img src="Funnel5.svg" alt="Funnel5.svg" width="75%">
+
+<p>
+Once the grace period completes, Task&nbsp;A
+starts waking up the tasks waiting for this grace period to complete,
+increments the <tt>-&gt;expedited_sequence</tt>,
+acquires the <tt>-&gt;exp_wake_mutex</tt> and then releases the
+<tt>-&gt;exp_mutex</tt>.
+This results in the following state:
+
+<p><img src="Funnel6.svg" alt="Funnel6.svg" width="75%">
+
+<p>
+Task&nbsp;E can then acquire <tt>-&gt;exp_mutex</tt> and increment
+<tt>-&gt;expedited_sequence</tt> to the value three.
+If new tasks&nbsp;G and&nbsp;H arrive and moves up the combining tree at the
+same time, the state will be as follows:
+
+<p><img src="Funnel7.svg" alt="Funnel7.svg" width="75%">
+
+<p>
+Note that three of the root <tt>rcu_node</tt> structure's
+waitqueues are now occupied.
+However, at some point, Task&nbsp;A will wake up the
+tasks blocked on the <tt>-&gt;exp_wq</tt> waitqueues, resulting
+in the following state:
+
+<p><img src="Funnel8.svg" alt="Funnel8.svg" width="75%">
+
+<p>
+Execution will continue with Tasks&nbsp;E and&nbsp;H completing
+their grace periods and carrying out their wakeups.
+
+<table>
+<tr><th>&nbsp;</th></tr>
+<tr><th align="left">Quick Quiz:</th></tr>
+<tr><td>
+       What happens if Task&nbsp;A takes so long to do its wakeups
+       that Task&nbsp;E's grace period completes?
+</td></tr>
+<tr><th align="left">Answer:</th></tr>
+<tr><td bgcolor="#ffffff"><font color="ffffff">
+       Then Task&nbsp;E will block on the <tt>-&gt;exp_wake_mutex</tt>,
+       which will also prevent it from releasing <tt>-&gt;exp_mutex</tt>,
+       which in turn will prevent the next grace period from starting.
+       This last is important in preventing overflow of the
+       <tt>-&gt;exp_wq[]</tt> array.
+</font></td></tr>
+<tr><td>&nbsp;</td></tr>
+</table>
+
+<h3><a name="Use of Workqueues">Use of Workqueues</a></h3>
+
+<p>
+In earlier implementations, the task requesting the expedited
+grace period also drove it to completion.
+This straightforward approach had the disadvantage of needing to
+account for signals sent to user tasks,
+so more recent implemementations use the Linux kernel's
+<a href="https://www.kernel.org/doc/Documentation/workqueue.txt">workqueues</a>.
+
+<p>
+The requesting task still does counter snapshotting and funnel-lock
+processing, but the task reaching the top of the funnel lock
+does a <tt>schedule_work()</tt> (from <tt>_synchronize_rcu_expedited()</tt>
+so that a workqueue kthread does the actual grace-period processing.
+Because workqueue kthreads do not accept signals, grace-period-wait
+processing need not allow for signals.
+
+In addition, this approach allows wakeups for the previous expedited
+grace period to be overlapped with processing for the next expedited
+grace period.
+Because there are only four sets of waitqueues, it is necessary to
+ensure that the previous grace period's wakeups complete before the
+next grace period's wakeups start.
+This is handled by having the <tt>-&gt;exp_mutex</tt>
+guard expedited grace-period processing and the
+<tt>-&gt;exp_wake_mutex</tt> guard wakeups.
+The key point is that the <tt>-&gt;exp_mutex</tt> is not released
+until the first wakeup is complete, which means that the
+<tt>-&gt;exp_wake_mutex</tt> has already been acquired at that point.
+This approach ensures that the previous grace period's wakeups can
+be carried out while the current grace period is in process, but
+that these wakeups will complete before the next grace period starts.
+This means that only three waitqueues are required, guaranteeing that
+the four that are provided are sufficient.
+
+<h3><a name="Stall Warnings">Stall Warnings</a></h3>
+
+<p>
+Expediting grace periods does nothing to speed things up when RCU
+readers take too long, and therefore expedited grace periods check
+for stalls just as normal grace periods do.
+
+<table>
+<tr><th>&nbsp;</th></tr>
+<tr><th align="left">Quick Quiz:</th></tr>
+<tr><td>
+       But why not just let the normal grace-period machinery
+       detect the stalls, given that a given reader must block
+       both normal and expedited grace periods?
+</td></tr>
+<tr><th align="left">Answer:</th></tr>
+<tr><td bgcolor="#ffffff"><font color="ffffff">
+       Because it is quite possible that at a given time there
+       is no normal grace period in progress, in which case the
+       normal grace period cannot emit a stall warning.
+</font></td></tr>
+<tr><td>&nbsp;</td></tr>
+</table>
+
+The <tt>synchronize_sched_expedited_wait()</tt> function loops waiting
+for the expedited grace period to end, but with a timeout set to the
+current RCU CPU stall-warning time.
+If this time is exceeded, any CPUs or <tt>rcu_node</tt> structures
+blocking the current grace period are printed.
+Each stall warning results in another pass through the loop, but the
+second and subsequent passes use longer stall times.
+
+<h3><a name="Summary">
+Summary</a></h3>
+
+<p>
+Expedited grace periods use a sequence-number approach to promote
+batching, so that a single grace-period operation can serve numerous
+requests.
+A funnel lock is used to efficiently identify the one task out of
+a concurrent group that will request the grace period.
+All members of the group will block on waitqueues provided in
+the <tt>rcu_node</tt> structure.
+The actual grace-period processing is carried out by a workqueue.
+
+<p>
+CPU-hotplug operations are noted lazily in order to prevent the need
+for tight synchronization between expedited grace periods and
+CPU-hotplug operations.
+The dyntick-idle counters are used to avoid sending IPIs to idle CPUs,
+at least in the common case.
+RCU-preempt and RCU-sched use different IPI handlers and different
+code to respond to the state changes carried out by those handlers,
+but otherwise use common code.
+
+<p>
+Quiescent states are tracked using the <tt>rcu_node</tt> tree,
+and once all necessary quiescent states have been reported,
+all tasks waiting on this expedited grace period are awakened.
+A pair of mutexes are used to allow one grace period's wakeups
+to proceed concurrently with the next grace period's processing.
+
+<p>
+This combination of mechanisms allows expedited grace periods to
+run reasonably efficiently.
+However, for non-time-critical tasks, normal grace periods should be
+used instead because their longer duration permits much higher
+degrees of batching, and thus much lower per-request overheads.
+
+</body></html>
diff --git a/Documentation/RCU/Design/Expedited-Grace-Periods/Funnel0.svg b/Documentation/RCU/Design/Expedited-Grace-Periods/Funnel0.svg
new file mode 100644 (file)
index 0000000..98af665
--- /dev/null
@@ -0,0 +1,275 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<svg
+   xmlns:dc="http://purl.org/dc/elements/1.1/"
+   xmlns:cc="http://creativecommons.org/ns#"
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+   xmlns:svg="http://www.w3.org/2000/svg"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   width="490.05093"
+   height="125.78741"
+   id="svg2"
+   version="1.1"
+   inkscape:version="0.48.4 r9939"
+   sodipodi:docname="Funnel0.svg">
+  <defs
+     id="defs4">
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Lend"
+       style="overflow:visible">
+      <path
+         id="path3792"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+         inkscape:connector-curvature="0" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Lstart"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Lstart"
+       style="overflow:visible">
+      <path
+         id="path3789"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="matrix(1.1,0,0,1.1,1.1,0)"
+         inkscape:connector-curvature="0" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Lstart"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Lstart-4"
+       style="overflow:visible">
+      <path
+         id="path3789-9"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="matrix(1.1,0,0,1.1,1.1,0)"
+         inkscape:connector-curvature="0" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Lend-4"
+       style="overflow:visible">
+      <path
+         id="path3792-4"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+         inkscape:connector-curvature="0" />
+    </marker>
+  </defs>
+  <sodipodi:namedview
+     id="base"
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1.0"
+     inkscape:pageopacity="0.0"
+     inkscape:pageshadow="2"
+     inkscape:zoom="1.3670394"
+     inkscape:cx="201.06495"
+     inkscape:cy="-86.548414"
+     inkscape:document-units="px"
+     inkscape:current-layer="layer1"
+     showgrid="false"
+     inkscape:window-width="1351"
+     inkscape:window-height="836"
+     inkscape:window-x="171"
+     inkscape:window-y="279"
+     inkscape:window-maximized="0"
+     fit-margin-top="5"
+     fit-margin-left="5"
+     fit-margin-right="5"
+     fit-margin-bottom="5" />
+  <metadata
+     id="metadata7">
+    <rdf:RDF>
+      <cc:Work
+         rdf:about="">
+        <dc:format>image/svg+xml</dc:format>
+        <dc:type
+           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+        <dc:title />
+      </cc:Work>
+    </rdf:RDF>
+  </metadata>
+  <g
+     inkscape:label="Layer 1"
+     inkscape:groupmode="layer"
+     id="layer1"
+     transform="translate(-117.08462,-249.92053)">
+    <flowRoot
+       xml:space="preserve"
+       id="flowRoot2985"
+       style="font-size:10px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Symbol;-inkscape-font-specification:Symbol"><flowRegion
+         id="flowRegion2987"><rect
+           id="rect2989"
+           width="82.85714"
+           height="11.428572"
+           x="240"
+           y="492.36218" /></flowRegion><flowPara
+         id="flowPara2991" /></flowRoot>    <text
+       xml:space="preserve"
+       style="font-size:10px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Symbol;-inkscape-font-specification:Symbol"
+       x="362.371"
+       y="262.51819"
+       id="text4441"
+       sodipodi:linespacing="125%"><tspan
+         sodipodi:role="line"
+         id="tspan4443"
+         x="362.371"
+         y="262.51819">-&gt;expedited_sequence: 0</tspan></text>
+    <rect
+       style="fill:none;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0"
+       id="rect3101"
+       width="43.158947"
+       height="26.33428"
+       x="253.55223"
+       y="275.07489" />
+    <rect
+       style="fill:#ff8282;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;fill-opacity:1"
+       id="rect3101-3"
+       width="43.158947"
+       height="26.33428"
+       x="297.04141"
+       y="275.07489" />
+    <rect
+       style="fill:#ff8282;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;fill-opacity:1"
+       id="rect3101-3-6"
+       width="43.158947"
+       height="26.33428"
+       x="427.509"
+       y="275.07489" />
+    <rect
+       style="fill:#ff8282;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;fill-opacity:1"
+       id="rect3101-3-6-7"
+       width="43.158947"
+       height="26.33428"
+       x="384.01981"
+       y="275.07489" />
+    <rect
+       style="fill:#ff8282;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;fill-opacity:1"
+       id="rect3101-3-6-7-5"
+       width="43.158947"
+       height="26.33428"
+       x="340.53061"
+       y="275.07489" />
+    <g
+       id="g3997"
+       transform="translate(-0.87295532,0)">
+      <rect
+         y="343.37366"
+         x="123.95757"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-35"
+         style="fill:none;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0" />
+      <rect
+         y="343.37366"
+         x="167.44673"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-3-62"
+         style="fill:#ff8282;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;fill-opacity:1" />
+      <rect
+         y="343.37366"
+         x="297.91437"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-3-6-9"
+         style="fill:#ff8282;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;fill-opacity:1" />
+      <rect
+         y="343.37366"
+         x="254.42516"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-3-6-7-1"
+         style="fill:#ff8282;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;fill-opacity:1" />
+      <rect
+         y="343.37366"
+         x="210.93593"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-3-6-7-5-2"
+         style="fill:#ff8282;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;fill-opacity:1" />
+      <text
+         xml:space="preserve"
+         style="font-size:20px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+         x="145.45404"
+         y="360.25174"
+         id="text3013"
+         sodipodi:linespacing="125%"><tspan
+           sodipodi:role="line"
+           id="tspan3015"
+           x="145.45404"
+           y="360.25174"
+           style="font-size:10px">:0</tspan></text>
+    </g>
+    <g
+       id="g3997-7"
+       transform="translate(260.06223,0)">
+      <rect
+         y="343.37366"
+         x="123.95757"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-35-0"
+         style="fill:none;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0" />
+      <rect
+         y="343.37366"
+         x="167.44673"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-3-62-9"
+         style="fill:#ff8282;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;fill-opacity:1" />
+      <rect
+         y="343.37366"
+         x="297.91437"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-3-6-9-3"
+         style="fill:#ff8282;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;fill-opacity:1" />
+      <rect
+         y="343.37366"
+         x="254.42516"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-3-6-7-1-6"
+         style="fill:#ff8282;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;fill-opacity:1" />
+      <rect
+         y="343.37366"
+         x="210.93593"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-3-6-7-5-2-0"
+         style="fill:#ff8282;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;fill-opacity:1" />
+      <text
+         xml:space="preserve"
+         style="font-size:20px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+         x="145.45404"
+         y="360.25174"
+         id="text3013-3"
+         sodipodi:linespacing="125%"><tspan
+           sodipodi:role="line"
+           id="tspan3015-6"
+           x="145.45404"
+           y="360.25174"
+           style="font-size:10px">:0</tspan></text>
+    </g>
+  </g>
+</svg>
diff --git a/Documentation/RCU/Design/Expedited-Grace-Periods/Funnel1.svg b/Documentation/RCU/Design/Expedited-Grace-Periods/Funnel1.svg
new file mode 100644 (file)
index 0000000..e0184a3
--- /dev/null
@@ -0,0 +1,275 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<svg
+   xmlns:dc="http://purl.org/dc/elements/1.1/"
+   xmlns:cc="http://creativecommons.org/ns#"
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+   xmlns:svg="http://www.w3.org/2000/svg"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   width="490.05093"
+   height="125.78741"
+   id="svg2"
+   version="1.1"
+   inkscape:version="0.48.4 r9939"
+   sodipodi:docname="Funnel1.svg">
+  <defs
+     id="defs4">
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Lend"
+       style="overflow:visible">
+      <path
+         id="path3792"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+         inkscape:connector-curvature="0" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Lstart"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Lstart"
+       style="overflow:visible">
+      <path
+         id="path3789"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="matrix(1.1,0,0,1.1,1.1,0)"
+         inkscape:connector-curvature="0" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Lstart"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Lstart-4"
+       style="overflow:visible">
+      <path
+         id="path3789-9"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="matrix(1.1,0,0,1.1,1.1,0)"
+         inkscape:connector-curvature="0" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Lend-4"
+       style="overflow:visible">
+      <path
+         id="path3792-4"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+         inkscape:connector-curvature="0" />
+    </marker>
+  </defs>
+  <sodipodi:namedview
+     id="base"
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1.0"
+     inkscape:pageopacity="0.0"
+     inkscape:pageshadow="2"
+     inkscape:zoom="1.3670394"
+     inkscape:cx="201.06495"
+     inkscape:cy="-86.548414"
+     inkscape:document-units="px"
+     inkscape:current-layer="g3997-7"
+     showgrid="false"
+     inkscape:window-width="1351"
+     inkscape:window-height="836"
+     inkscape:window-x="363"
+     inkscape:window-y="336"
+     inkscape:window-maximized="0"
+     fit-margin-top="5"
+     fit-margin-left="5"
+     fit-margin-right="5"
+     fit-margin-bottom="5" />
+  <metadata
+     id="metadata7">
+    <rdf:RDF>
+      <cc:Work
+         rdf:about="">
+        <dc:format>image/svg+xml</dc:format>
+        <dc:type
+           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+        <dc:title />
+      </cc:Work>
+    </rdf:RDF>
+  </metadata>
+  <g
+     inkscape:label="Layer 1"
+     inkscape:groupmode="layer"
+     id="layer1"
+     transform="translate(-117.08462,-249.92053)">
+    <flowRoot
+       xml:space="preserve"
+       id="flowRoot2985"
+       style="font-size:10px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Symbol;-inkscape-font-specification:Symbol"><flowRegion
+         id="flowRegion2987"><rect
+           id="rect2989"
+           width="82.85714"
+           height="11.428572"
+           x="240"
+           y="492.36218" /></flowRegion><flowPara
+         id="flowPara2991" /></flowRoot>    <text
+       xml:space="preserve"
+       style="font-size:10px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Symbol;-inkscape-font-specification:Symbol"
+       x="362.371"
+       y="262.51819"
+       id="text4441"
+       sodipodi:linespacing="125%"><tspan
+         sodipodi:role="line"
+         id="tspan4443"
+         x="362.371"
+         y="262.51819">-&gt;expedited_sequence: 0</tspan></text>
+    <rect
+       style="fill:none;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0"
+       id="rect3101"
+       width="43.158947"
+       height="26.33428"
+       x="253.55223"
+       y="275.07489" />
+    <rect
+       style="fill:#ff8282;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;fill-opacity:1"
+       id="rect3101-3"
+       width="43.158947"
+       height="26.33428"
+       x="297.04141"
+       y="275.07489" />
+    <rect
+       style="fill:#ff8282;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;fill-opacity:1"
+       id="rect3101-3-6"
+       width="43.158947"
+       height="26.33428"
+       x="427.509"
+       y="275.07489" />
+    <rect
+       style="fill:#ff8282;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;fill-opacity:1"
+       id="rect3101-3-6-7"
+       width="43.158947"
+       height="26.33428"
+       x="384.01981"
+       y="275.07489" />
+    <rect
+       style="fill:#ff8282;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;fill-opacity:1"
+       id="rect3101-3-6-7-5"
+       width="43.158947"
+       height="26.33428"
+       x="340.53061"
+       y="275.07489" />
+    <g
+       id="g3997"
+       transform="translate(-0.87295532,0)">
+      <rect
+         y="343.37366"
+         x="123.95757"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-35"
+         style="fill:none;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0" />
+      <rect
+         y="343.37366"
+         x="167.44673"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-3-62"
+         style="fill:#ff8282;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;fill-opacity:1" />
+      <rect
+         y="343.37366"
+         x="297.91437"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-3-6-9"
+         style="fill:#ff8282;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;fill-opacity:1" />
+      <rect
+         y="343.37366"
+         x="254.42516"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-3-6-7-1"
+         style="fill:#ff8282;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;fill-opacity:1" />
+      <rect
+         y="343.37366"
+         x="210.93593"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-3-6-7-5-2"
+         style="fill:#ff8282;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;fill-opacity:1" />
+      <text
+         xml:space="preserve"
+         style="font-size:20px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+         x="146.00092"
+         y="360.25174"
+         id="text3013"
+         sodipodi:linespacing="125%"><tspan
+           sodipodi:role="line"
+           id="tspan3015"
+           x="146.00092"
+           y="360.25174"
+           style="font-size:10px">A:2</tspan></text>
+    </g>
+    <g
+       id="g3997-7"
+       transform="translate(260.06223,0)">
+      <rect
+         y="343.37366"
+         x="123.95757"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-35-0"
+         style="fill:none;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0" />
+      <rect
+         y="343.37366"
+         x="167.44673"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-3-62-9"
+         style="fill:#ff8282;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;fill-opacity:1" />
+      <rect
+         y="343.37366"
+         x="297.91437"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-3-6-9-3"
+         style="fill:#ff8282;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;fill-opacity:1" />
+      <rect
+         y="343.37366"
+         x="254.42516"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-3-6-7-1-6"
+         style="fill:#ff8282;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;fill-opacity:1" />
+      <rect
+         y="343.37366"
+         x="210.93593"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-3-6-7-5-2-0"
+         style="fill:#ff8282;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;fill-opacity:1" />
+      <text
+         xml:space="preserve"
+         style="font-size:20px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+         x="145.54926"
+         y="360.25174"
+         id="text3013-3"
+         sodipodi:linespacing="125%"><tspan
+           sodipodi:role="line"
+           id="tspan3015-6"
+           x="145.54926"
+           y="360.25174"
+           style="font-size:10px">B:2</tspan></text>
+    </g>
+  </g>
+</svg>
diff --git a/Documentation/RCU/Design/Expedited-Grace-Periods/Funnel2.svg b/Documentation/RCU/Design/Expedited-Grace-Periods/Funnel2.svg
new file mode 100644 (file)
index 0000000..1bc3fed
--- /dev/null
@@ -0,0 +1,287 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<svg
+   xmlns:dc="http://purl.org/dc/elements/1.1/"
+   xmlns:cc="http://creativecommons.org/ns#"
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+   xmlns:svg="http://www.w3.org/2000/svg"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   width="490.05093"
+   height="125.78741"
+   id="svg2"
+   version="1.1"
+   inkscape:version="0.48.4 r9939"
+   sodipodi:docname="Funnel2.svg">
+  <defs
+     id="defs4">
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Lend"
+       style="overflow:visible">
+      <path
+         id="path3792"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+         inkscape:connector-curvature="0" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Lstart"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Lstart"
+       style="overflow:visible">
+      <path
+         id="path3789"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="matrix(1.1,0,0,1.1,1.1,0)"
+         inkscape:connector-curvature="0" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Lstart"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Lstart-4"
+       style="overflow:visible">
+      <path
+         id="path3789-9"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="matrix(1.1,0,0,1.1,1.1,0)"
+         inkscape:connector-curvature="0" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Lend-4"
+       style="overflow:visible">
+      <path
+         id="path3792-4"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+         inkscape:connector-curvature="0" />
+    </marker>
+  </defs>
+  <sodipodi:namedview
+     id="base"
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1.0"
+     inkscape:pageopacity="0.0"
+     inkscape:pageshadow="2"
+     inkscape:zoom="1.3670394"
+     inkscape:cx="114.01552"
+     inkscape:cy="-86.548414"
+     inkscape:document-units="px"
+     inkscape:current-layer="g3997-7"
+     showgrid="false"
+     inkscape:window-width="1351"
+     inkscape:window-height="836"
+     inkscape:window-x="363"
+     inkscape:window-y="336"
+     inkscape:window-maximized="0"
+     fit-margin-top="5"
+     fit-margin-left="5"
+     fit-margin-right="5"
+     fit-margin-bottom="5" />
+  <metadata
+     id="metadata7">
+    <rdf:RDF>
+      <cc:Work
+         rdf:about="">
+        <dc:format>image/svg+xml</dc:format>
+        <dc:type
+           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+        <dc:title />
+      </cc:Work>
+    </rdf:RDF>
+  </metadata>
+  <g
+     inkscape:label="Layer 1"
+     inkscape:groupmode="layer"
+     id="layer1"
+     transform="translate(-117.08462,-249.92053)">
+    <flowRoot
+       xml:space="preserve"
+       id="flowRoot2985"
+       style="font-size:10px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Symbol;-inkscape-font-specification:Symbol"><flowRegion
+         id="flowRegion2987"><rect
+           id="rect2989"
+           width="82.85714"
+           height="11.428572"
+           x="240"
+           y="492.36218" /></flowRegion><flowPara
+         id="flowPara2991" /></flowRoot>    <text
+       xml:space="preserve"
+       style="font-size:10px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Symbol;-inkscape-font-specification:Symbol"
+       x="362.371"
+       y="262.51819"
+       id="text4441"
+       sodipodi:linespacing="125%"><tspan
+         sodipodi:role="line"
+         id="tspan4443"
+         x="362.371"
+         y="262.51819">-&gt;expedited_sequence: 0</tspan></text>
+    <rect
+       style="fill:none;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0"
+       id="rect3101"
+       width="43.158947"
+       height="26.33428"
+       x="253.55223"
+       y="275.07489" />
+    <rect
+       style="fill:#ff8282;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;fill-opacity:1"
+       id="rect3101-3"
+       width="43.158947"
+       height="26.33428"
+       x="297.04141"
+       y="275.07489" />
+    <rect
+       style="fill:#ff8282;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;fill-opacity:1"
+       id="rect3101-3-6"
+       width="43.158947"
+       height="26.33428"
+       x="427.509"
+       y="275.07489" />
+    <rect
+       style="fill:#ff8282;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;fill-opacity:1"
+       id="rect3101-3-6-7"
+       width="43.158947"
+       height="26.33428"
+       x="384.01981"
+       y="275.07489" />
+    <rect
+       style="fill:#ff8282;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;fill-opacity:1"
+       id="rect3101-3-6-7-5"
+       width="43.158947"
+       height="26.33428"
+       x="340.53061"
+       y="275.07489" />
+    <g
+       id="g3997"
+       transform="translate(-0.87295532,0)">
+      <rect
+         y="343.37366"
+         x="123.95757"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-35"
+         style="fill:none;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0" />
+      <rect
+         y="343.37366"
+         x="167.44673"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-3-62"
+         style="fill:#ff8282;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;fill-opacity:1" />
+      <rect
+         y="343.37366"
+         x="297.91437"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-3-6-9"
+         style="fill:#ff8282;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;fill-opacity:1" />
+      <rect
+         y="343.37366"
+         x="254.42516"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-3-6-7-1"
+         style="fill:#ff8282;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;fill-opacity:1" />
+      <rect
+         y="343.37366"
+         x="210.93593"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-3-6-7-5-2"
+         style="fill:#ff8282;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;fill-opacity:1" />
+      <text
+         xml:space="preserve"
+         style="font-size:20px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+         x="146.00092"
+         y="360.25174"
+         id="text3013"
+         sodipodi:linespacing="125%"><tspan
+           sodipodi:role="line"
+           id="tspan3015"
+           x="146.00092"
+           y="360.25174"
+           style="font-size:10px">:2</tspan></text>
+    </g>
+    <g
+       id="g3997-7"
+       transform="translate(260.06223,0)">
+      <rect
+         y="343.37366"
+         x="123.95757"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-35-0"
+         style="fill:none;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0" />
+      <rect
+         y="343.37366"
+         x="167.44673"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-3-62-9"
+         style="fill:#ff8282;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;fill-opacity:1" />
+      <rect
+         y="343.37366"
+         x="297.91437"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-3-6-9-3"
+         style="fill:#ff8282;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;fill-opacity:1" />
+      <rect
+         y="343.37366"
+         x="254.42516"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-3-6-7-1-6"
+         style="fill:#ff8282;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;fill-opacity:1" />
+      <rect
+         y="343.37366"
+         x="210.93593"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-3-6-7-5-2-0"
+         style="fill:#ff8282;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;fill-opacity:1" />
+      <text
+         xml:space="preserve"
+         style="font-size:20px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+         x="145.54926"
+         y="360.25174"
+         id="text3013-3"
+         sodipodi:linespacing="125%"><tspan
+           sodipodi:role="line"
+           id="tspan3015-6"
+           x="145.54926"
+           y="360.25174"
+           style="font-size:10px">B:2</tspan></text>
+    </g>
+    <text
+       xml:space="preserve"
+       style="font-size:20px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+       x="275.59558"
+       y="291.95297"
+       id="text3013-36"
+       sodipodi:linespacing="125%"><tspan
+         sodipodi:role="line"
+         id="tspan3015-7"
+         x="275.59558"
+         y="291.95297"
+         style="font-size:10px">A:2</tspan></text>
+  </g>
+</svg>
diff --git a/Documentation/RCU/Design/Expedited-Grace-Periods/Funnel3.svg b/Documentation/RCU/Design/Expedited-Grace-Periods/Funnel3.svg
new file mode 100644 (file)
index 0000000..6d8a1bf
--- /dev/null
@@ -0,0 +1,323 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<svg
+   xmlns:dc="http://purl.org/dc/elements/1.1/"
+   xmlns:cc="http://creativecommons.org/ns#"
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+   xmlns:svg="http://www.w3.org/2000/svg"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   width="490.05093"
+   height="125.78741"
+   id="svg2"
+   version="1.1"
+   inkscape:version="0.48.4 r9939"
+   sodipodi:docname="Funnel3.svg">
+  <defs
+     id="defs4">
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Lend"
+       style="overflow:visible">
+      <path
+         id="path3792"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+         inkscape:connector-curvature="0" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Lstart"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Lstart"
+       style="overflow:visible">
+      <path
+         id="path3789"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="matrix(1.1,0,0,1.1,1.1,0)"
+         inkscape:connector-curvature="0" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Lstart"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Lstart-4"
+       style="overflow:visible">
+      <path
+         id="path3789-9"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="matrix(1.1,0,0,1.1,1.1,0)"
+         inkscape:connector-curvature="0" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Lend-4"
+       style="overflow:visible">
+      <path
+         id="path3792-4"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+         inkscape:connector-curvature="0" />
+    </marker>
+  </defs>
+  <sodipodi:namedview
+     id="base"
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1.0"
+     inkscape:pageopacity="0.0"
+     inkscape:pageshadow="2"
+     inkscape:zoom="1.3670394"
+     inkscape:cx="114.01552"
+     inkscape:cy="-86.548414"
+     inkscape:document-units="px"
+     inkscape:current-layer="layer1"
+     showgrid="false"
+     inkscape:window-width="1351"
+     inkscape:window-height="836"
+     inkscape:window-x="68"
+     inkscape:window-y="180"
+     inkscape:window-maximized="0"
+     fit-margin-top="5"
+     fit-margin-left="5"
+     fit-margin-right="5"
+     fit-margin-bottom="5" />
+  <metadata
+     id="metadata7">
+    <rdf:RDF>
+      <cc:Work
+         rdf:about="">
+        <dc:format>image/svg+xml</dc:format>
+        <dc:type
+           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+        <dc:title />
+      </cc:Work>
+    </rdf:RDF>
+  </metadata>
+  <g
+     inkscape:label="Layer 1"
+     inkscape:groupmode="layer"
+     id="layer1"
+     transform="translate(-117.08462,-249.92053)">
+    <flowRoot
+       xml:space="preserve"
+       id="flowRoot2985"
+       style="font-size:10px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Symbol;-inkscape-font-specification:Symbol"><flowRegion
+         id="flowRegion2987"><rect
+           id="rect2989"
+           width="82.85714"
+           height="11.428572"
+           x="240"
+           y="492.36218" /></flowRegion><flowPara
+         id="flowPara2991" /></flowRoot>    <text
+       xml:space="preserve"
+       style="font-size:10px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Symbol;-inkscape-font-specification:Symbol"
+       x="362.371"
+       y="262.51819"
+       id="text4441"
+       sodipodi:linespacing="125%"><tspan
+         sodipodi:role="line"
+         id="tspan4443"
+         x="362.371"
+         y="262.51819">-&gt;expedited_sequence: 0  GP: A</tspan></text>
+    <rect
+       style="fill:none;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0"
+       id="rect3101"
+       width="43.158947"
+       height="26.33428"
+       x="253.55223"
+       y="275.07489" />
+    <rect
+       style="fill:#ff8282;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;fill-opacity:1"
+       id="rect3101-3"
+       width="43.158947"
+       height="26.33428"
+       x="297.04141"
+       y="275.07489" />
+    <rect
+       style="fill:#ff8282;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;fill-opacity:1"
+       id="rect3101-3-6"
+       width="43.158947"
+       height="26.33428"
+       x="427.509"
+       y="275.07489" />
+    <rect
+       style="fill:#ff8282;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;fill-opacity:1"
+       id="rect3101-3-6-7"
+       width="43.158947"
+       height="26.33428"
+       x="384.01981"
+       y="275.07489" />
+    <rect
+       style="fill:#ff8282;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;fill-opacity:1"
+       id="rect3101-3-6-7-5"
+       width="43.158947"
+       height="26.33428"
+       x="340.53061"
+       y="275.07489" />
+    <g
+       id="g3997"
+       transform="translate(-0.87295532,0)">
+      <rect
+         y="343.37366"
+         x="123.95757"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-35"
+         style="fill:none;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0" />
+      <rect
+         y="343.37366"
+         x="167.44673"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-3-62"
+         style="fill:#ff8282;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;fill-opacity:1" />
+      <rect
+         y="343.37366"
+         x="297.91437"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-3-6-9"
+         style="fill:#ff8282;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;fill-opacity:1" />
+      <rect
+         y="343.37366"
+         x="254.42516"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-3-6-7-1"
+         style="fill:#ff8282;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;fill-opacity:1" />
+      <rect
+         y="343.37366"
+         x="210.93593"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-3-6-7-5-2"
+         style="fill:#ff8282;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;fill-opacity:1" />
+      <text
+         xml:space="preserve"
+         style="font-size:20px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+         x="146.00092"
+         y="360.25174"
+         id="text3013"
+         sodipodi:linespacing="125%"><tspan
+           sodipodi:role="line"
+           id="tspan3015"
+           x="146.00092"
+           y="360.25174"
+           style="font-size:10px">:2</tspan></text>
+      <text
+         xml:space="preserve"
+         style="font-size:20px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+         x="232.51051"
+         y="360.18094"
+         id="text3013-3-3"
+         sodipodi:linespacing="125%"><tspan
+           sodipodi:role="line"
+           id="tspan3015-6-6"
+           x="232.51051"
+           y="360.18094"
+           style="font-size:10px">C</tspan></text>
+    </g>
+    <g
+       id="g3019"
+       transform="translate(260.06223,0)">
+      <rect
+         y="343.37366"
+         x="123.95757"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-35-0"
+         style="fill:none;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0" />
+      <rect
+         y="343.37366"
+         x="167.44673"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-3-62-9"
+         style="fill:#ff8282;fill-opacity:1;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0" />
+      <rect
+         y="343.37366"
+         x="297.91437"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-3-6-9-3"
+         style="fill:#ff8282;fill-opacity:1;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0" />
+      <rect
+         y="343.37366"
+         x="254.42516"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-3-6-7-1-6"
+         style="fill:#ff8282;fill-opacity:1;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0" />
+      <rect
+         y="343.37366"
+         x="210.93593"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-3-6-7-5-2-0"
+         style="fill:#ff8282;fill-opacity:1;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0" />
+      <text
+         xml:space="preserve"
+         style="font-size:20px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+         x="145.54926"
+         y="360.25174"
+         id="text3013-3"
+         sodipodi:linespacing="125%"><tspan
+           sodipodi:role="line"
+           id="tspan3015-6"
+           x="145.54926"
+           y="360.25174"
+           style="font-size:10px">:2</tspan></text>
+      <text
+         xml:space="preserve"
+         style="font-size:20px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+         x="232.31764"
+         y="360.18582"
+         id="text3013-3-3-7"
+         sodipodi:linespacing="125%"><tspan
+           sodipodi:role="line"
+           id="tspan3015-6-6-5"
+           x="232.31764"
+           y="360.18582"
+           style="font-size:10px">D</tspan></text>
+    </g>
+    <text
+       xml:space="preserve"
+       style="font-size:20px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+       x="275.59558"
+       y="291.95297"
+       id="text3013-36"
+       sodipodi:linespacing="125%"><tspan
+         sodipodi:role="line"
+         id="tspan3015-7"
+         x="275.59558"
+         y="291.95297"
+         style="font-size:10px">:2</tspan></text>
+    <text
+       xml:space="preserve"
+       style="font-size:20px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+       x="361.97092"
+       y="291.88705"
+       id="text3013-3-36"
+       sodipodi:linespacing="125%"><tspan
+         sodipodi:role="line"
+         id="tspan3015-6-7"
+         x="361.97092"
+         y="291.88705"
+         style="font-size:10px">B</tspan></text>
+  </g>
+</svg>
diff --git a/Documentation/RCU/Design/Expedited-Grace-Periods/Funnel4.svg b/Documentation/RCU/Design/Expedited-Grace-Periods/Funnel4.svg
new file mode 100644 (file)
index 0000000..44018fd
--- /dev/null
@@ -0,0 +1,323 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<svg
+   xmlns:dc="http://purl.org/dc/elements/1.1/"
+   xmlns:cc="http://creativecommons.org/ns#"
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+   xmlns:svg="http://www.w3.org/2000/svg"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   width="490.05093"
+   height="125.78741"
+   id="svg2"
+   version="1.1"
+   inkscape:version="0.48.4 r9939"
+   sodipodi:docname="Funnel4.svg">
+  <defs
+     id="defs4">
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Lend"
+       style="overflow:visible">
+      <path
+         id="path3792"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+         inkscape:connector-curvature="0" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Lstart"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Lstart"
+       style="overflow:visible">
+      <path
+         id="path3789"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="matrix(1.1,0,0,1.1,1.1,0)"
+         inkscape:connector-curvature="0" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Lstart"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Lstart-4"
+       style="overflow:visible">
+      <path
+         id="path3789-9"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="matrix(1.1,0,0,1.1,1.1,0)"
+         inkscape:connector-curvature="0" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Lend-4"
+       style="overflow:visible">
+      <path
+         id="path3792-4"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+         inkscape:connector-curvature="0" />
+    </marker>
+  </defs>
+  <sodipodi:namedview
+     id="base"
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1.0"
+     inkscape:pageopacity="0.0"
+     inkscape:pageshadow="2"
+     inkscape:zoom="1.3670394"
+     inkscape:cx="114.01552"
+     inkscape:cy="-86.548414"
+     inkscape:document-units="px"
+     inkscape:current-layer="layer1"
+     showgrid="false"
+     inkscape:window-width="1351"
+     inkscape:window-height="836"
+     inkscape:window-x="68"
+     inkscape:window-y="180"
+     inkscape:window-maximized="0"
+     fit-margin-top="5"
+     fit-margin-left="5"
+     fit-margin-right="5"
+     fit-margin-bottom="5" />
+  <metadata
+     id="metadata7">
+    <rdf:RDF>
+      <cc:Work
+         rdf:about="">
+        <dc:format>image/svg+xml</dc:format>
+        <dc:type
+           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+        <dc:title />
+      </cc:Work>
+    </rdf:RDF>
+  </metadata>
+  <g
+     inkscape:label="Layer 1"
+     inkscape:groupmode="layer"
+     id="layer1"
+     transform="translate(-117.08462,-249.92053)">
+    <flowRoot
+       xml:space="preserve"
+       id="flowRoot2985"
+       style="font-size:10px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Symbol;-inkscape-font-specification:Symbol"><flowRegion
+         id="flowRegion2987"><rect
+           id="rect2989"
+           width="82.85714"
+           height="11.428572"
+           x="240"
+           y="492.36218" /></flowRegion><flowPara
+         id="flowPara2991" /></flowRoot>    <text
+       xml:space="preserve"
+       style="font-size:10px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Symbol;-inkscape-font-specification:Symbol"
+       x="362.371"
+       y="262.51819"
+       id="text4441"
+       sodipodi:linespacing="125%"><tspan
+         sodipodi:role="line"
+         id="tspan4443"
+         x="362.371"
+         y="262.51819">-&gt;expedited_sequence: 1  GP: A</tspan></text>
+    <rect
+       style="fill:none;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0"
+       id="rect3101"
+       width="43.158947"
+       height="26.33428"
+       x="253.55223"
+       y="275.07489" />
+    <rect
+       style="fill:#ff8282;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;fill-opacity:1"
+       id="rect3101-3"
+       width="43.158947"
+       height="26.33428"
+       x="297.04141"
+       y="275.07489" />
+    <rect
+       style="fill:#ff8282;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;fill-opacity:1"
+       id="rect3101-3-6"
+       width="43.158947"
+       height="26.33428"
+       x="427.509"
+       y="275.07489" />
+    <rect
+       style="fill:#ff8282;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;fill-opacity:1"
+       id="rect3101-3-6-7"
+       width="43.158947"
+       height="26.33428"
+       x="384.01981"
+       y="275.07489" />
+    <rect
+       style="fill:#ff8282;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;fill-opacity:1"
+       id="rect3101-3-6-7-5"
+       width="43.158947"
+       height="26.33428"
+       x="340.53061"
+       y="275.07489" />
+    <g
+       id="g3997"
+       transform="translate(-0.87295532,0)">
+      <rect
+         y="343.37366"
+         x="123.95757"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-35"
+         style="fill:none;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0" />
+      <rect
+         y="343.37366"
+         x="167.44673"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-3-62"
+         style="fill:#ff8282;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;fill-opacity:1" />
+      <rect
+         y="343.37366"
+         x="297.91437"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-3-6-9"
+         style="fill:#ff8282;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;fill-opacity:1" />
+      <rect
+         y="343.37366"
+         x="254.42516"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-3-6-7-1"
+         style="fill:#ff8282;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;fill-opacity:1" />
+      <rect
+         y="343.37366"
+         x="210.93593"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-3-6-7-5-2"
+         style="fill:#ff8282;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;fill-opacity:1" />
+      <text
+         xml:space="preserve"
+         style="font-size:20px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+         x="146.00092"
+         y="360.25174"
+         id="text3013"
+         sodipodi:linespacing="125%"><tspan
+           sodipodi:role="line"
+           id="tspan3015"
+           x="146.00092"
+           y="360.25174"
+           style="font-size:10px">E:4</tspan></text>
+      <text
+         xml:space="preserve"
+         style="font-size:20px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+         x="232.51051"
+         y="360.18094"
+         id="text3013-3-3"
+         sodipodi:linespacing="125%"><tspan
+           sodipodi:role="line"
+           id="tspan3015-6-6"
+           x="232.51051"
+           y="360.18094"
+           style="font-size:10px">C</tspan></text>
+    </g>
+    <g
+       id="g3019"
+       transform="translate(260.06223,0)">
+      <rect
+         y="343.37366"
+         x="123.95757"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-35-0"
+         style="fill:none;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0" />
+      <rect
+         y="343.37366"
+         x="167.44673"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-3-62-9"
+         style="fill:#ff8282;fill-opacity:1;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0" />
+      <rect
+         y="343.37366"
+         x="297.91437"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-3-6-9-3"
+         style="fill:#ff8282;fill-opacity:1;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0" />
+      <rect
+         y="343.37366"
+         x="254.42516"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-3-6-7-1-6"
+         style="fill:#ff8282;fill-opacity:1;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0" />
+      <rect
+         y="343.37366"
+         x="210.93593"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-3-6-7-5-2-0"
+         style="fill:#ff8282;fill-opacity:1;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0" />
+      <text
+         xml:space="preserve"
+         style="font-size:20px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+         x="145.54926"
+         y="360.25174"
+         id="text3013-3"
+         sodipodi:linespacing="125%"><tspan
+           sodipodi:role="line"
+           id="tspan3015-6"
+           x="145.54926"
+           y="360.25174"
+           style="font-size:10px">F:4</tspan></text>
+      <text
+         xml:space="preserve"
+         style="font-size:20px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+         x="232.31764"
+         y="360.18582"
+         id="text3013-3-3-7"
+         sodipodi:linespacing="125%"><tspan
+           sodipodi:role="line"
+           id="tspan3015-6-6-5"
+           x="232.31764"
+           y="360.18582"
+           style="font-size:10px">D</tspan></text>
+    </g>
+    <text
+       xml:space="preserve"
+       style="font-size:20px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+       x="275.59558"
+       y="291.95297"
+       id="text3013-36"
+       sodipodi:linespacing="125%"><tspan
+         sodipodi:role="line"
+         id="tspan3015-7"
+         x="275.59558"
+         y="291.95297"
+         style="font-size:10px">:2</tspan></text>
+    <text
+       xml:space="preserve"
+       style="font-size:20px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+       x="361.97092"
+       y="291.88705"
+       id="text3013-3-36"
+       sodipodi:linespacing="125%"><tspan
+         sodipodi:role="line"
+         id="tspan3015-6-7"
+         x="361.97092"
+         y="291.88705"
+         style="font-size:10px">B</tspan></text>
+  </g>
+</svg>
diff --git a/Documentation/RCU/Design/Expedited-Grace-Periods/Funnel5.svg b/Documentation/RCU/Design/Expedited-Grace-Periods/Funnel5.svg
new file mode 100644 (file)
index 0000000..e5eef50
--- /dev/null
@@ -0,0 +1,335 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<svg
+   xmlns:dc="http://purl.org/dc/elements/1.1/"
+   xmlns:cc="http://creativecommons.org/ns#"
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+   xmlns:svg="http://www.w3.org/2000/svg"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   width="490.05093"
+   height="125.78741"
+   id="svg2"
+   version="1.1"
+   inkscape:version="0.48.4 r9939"
+   sodipodi:docname="Funnel5.svg">
+  <defs
+     id="defs4">
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Lend"
+       style="overflow:visible">
+      <path
+         id="path3792"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+         inkscape:connector-curvature="0" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Lstart"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Lstart"
+       style="overflow:visible">
+      <path
+         id="path3789"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="matrix(1.1,0,0,1.1,1.1,0)"
+         inkscape:connector-curvature="0" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Lstart"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Lstart-4"
+       style="overflow:visible">
+      <path
+         id="path3789-9"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="matrix(1.1,0,0,1.1,1.1,0)"
+         inkscape:connector-curvature="0" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Lend-4"
+       style="overflow:visible">
+      <path
+         id="path3792-4"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+         inkscape:connector-curvature="0" />
+    </marker>
+  </defs>
+  <sodipodi:namedview
+     id="base"
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1.0"
+     inkscape:pageopacity="0.0"
+     inkscape:pageshadow="2"
+     inkscape:zoom="1.3670394"
+     inkscape:cx="114.01552"
+     inkscape:cy="-86.548414"
+     inkscape:document-units="px"
+     inkscape:current-layer="layer1"
+     showgrid="false"
+     inkscape:window-width="1351"
+     inkscape:window-height="836"
+     inkscape:window-x="68"
+     inkscape:window-y="180"
+     inkscape:window-maximized="0"
+     fit-margin-top="5"
+     fit-margin-left="5"
+     fit-margin-right="5"
+     fit-margin-bottom="5" />
+  <metadata
+     id="metadata7">
+    <rdf:RDF>
+      <cc:Work
+         rdf:about="">
+        <dc:format>image/svg+xml</dc:format>
+        <dc:type
+           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+        <dc:title />
+      </cc:Work>
+    </rdf:RDF>
+  </metadata>
+  <g
+     inkscape:label="Layer 1"
+     inkscape:groupmode="layer"
+     id="layer1"
+     transform="translate(-117.08462,-249.92053)">
+    <flowRoot
+       xml:space="preserve"
+       id="flowRoot2985"
+       style="font-size:10px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Symbol;-inkscape-font-specification:Symbol"><flowRegion
+         id="flowRegion2987"><rect
+           id="rect2989"
+           width="82.85714"
+           height="11.428572"
+           x="240"
+           y="492.36218" /></flowRegion><flowPara
+         id="flowPara2991" /></flowRoot>    <text
+       xml:space="preserve"
+       style="font-size:10px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Symbol;-inkscape-font-specification:Symbol"
+       x="362.371"
+       y="262.51819"
+       id="text4441"
+       sodipodi:linespacing="125%"><tspan
+         sodipodi:role="line"
+         id="tspan4443"
+         x="362.371"
+         y="262.51819">-&gt;expedited_sequence: 1  GP: A,E</tspan></text>
+    <rect
+       style="fill:none;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0"
+       id="rect3101"
+       width="43.158947"
+       height="26.33428"
+       x="253.55223"
+       y="275.07489" />
+    <rect
+       style="fill:#ff8282;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;fill-opacity:1"
+       id="rect3101-3"
+       width="43.158947"
+       height="26.33428"
+       x="297.04141"
+       y="275.07489" />
+    <rect
+       style="fill:#ff8282;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;fill-opacity:1"
+       id="rect3101-3-6"
+       width="43.158947"
+       height="26.33428"
+       x="427.509"
+       y="275.07489" />
+    <rect
+       style="fill:#ff8282;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;fill-opacity:1"
+       id="rect3101-3-6-7"
+       width="43.158947"
+       height="26.33428"
+       x="384.01981"
+       y="275.07489" />
+    <rect
+       style="fill:#ff8282;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;fill-opacity:1"
+       id="rect3101-3-6-7-5"
+       width="43.158947"
+       height="26.33428"
+       x="340.53061"
+       y="275.07489" />
+    <g
+       id="g3997"
+       transform="translate(-0.87295532,0)">
+      <rect
+         y="343.37366"
+         x="123.95757"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-35"
+         style="fill:none;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0" />
+      <rect
+         y="343.37366"
+         x="167.44673"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-3-62"
+         style="fill:#ff8282;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;fill-opacity:1" />
+      <rect
+         y="343.37366"
+         x="297.91437"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-3-6-9"
+         style="fill:#ff8282;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;fill-opacity:1" />
+      <rect
+         y="343.37366"
+         x="254.42516"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-3-6-7-1"
+         style="fill:#ff8282;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;fill-opacity:1" />
+      <rect
+         y="343.37366"
+         x="210.93593"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-3-6-7-5-2"
+         style="fill:#ff8282;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;fill-opacity:1" />
+      <text
+         xml:space="preserve"
+         style="font-size:20px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+         x="146.00092"
+         y="360.25174"
+         id="text3013"
+         sodipodi:linespacing="125%"><tspan
+           sodipodi:role="line"
+           id="tspan3015"
+           x="146.00092"
+           y="360.25174"
+           style="font-size:10px">:4</tspan></text>
+      <text
+         xml:space="preserve"
+         style="font-size:20px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+         x="232.51051"
+         y="360.18094"
+         id="text3013-3-3"
+         sodipodi:linespacing="125%"><tspan
+           sodipodi:role="line"
+           id="tspan3015-6-6"
+           x="232.51051"
+           y="360.18094"
+           style="font-size:10px">C</tspan></text>
+    </g>
+    <g
+       id="g3019"
+       transform="translate(260.06223,0)">
+      <rect
+         y="343.37366"
+         x="123.95757"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-35-0"
+         style="fill:none;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0" />
+      <rect
+         y="343.37366"
+         x="167.44673"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-3-62-9"
+         style="fill:#ff8282;fill-opacity:1;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0" />
+      <rect
+         y="343.37366"
+         x="297.91437"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-3-6-9-3"
+         style="fill:#ff8282;fill-opacity:1;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0" />
+      <rect
+         y="343.37366"
+         x="254.42516"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-3-6-7-1-6"
+         style="fill:#ff8282;fill-opacity:1;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0" />
+      <rect
+         y="343.37366"
+         x="210.93593"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-3-6-7-5-2-0"
+         style="fill:#ff8282;fill-opacity:1;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0" />
+      <text
+         xml:space="preserve"
+         style="font-size:20px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+         x="145.54926"
+         y="360.25174"
+         id="text3013-3"
+         sodipodi:linespacing="125%"><tspan
+           sodipodi:role="line"
+           id="tspan3015-6"
+           x="145.54926"
+           y="360.25174"
+           style="font-size:10px">:4</tspan></text>
+      <text
+         xml:space="preserve"
+         style="font-size:20px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+         x="232.31764"
+         y="360.18582"
+         id="text3013-3-3-7"
+         sodipodi:linespacing="125%"><tspan
+           sodipodi:role="line"
+           id="tspan3015-6-6-5"
+           x="232.31764"
+           y="360.18582"
+           style="font-size:10px">D</tspan></text>
+    </g>
+    <text
+       xml:space="preserve"
+       style="font-size:20px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+       x="275.59558"
+       y="291.95297"
+       id="text3013-36"
+       sodipodi:linespacing="125%"><tspan
+         sodipodi:role="line"
+         id="tspan3015-7"
+         x="275.59558"
+         y="291.95297"
+         style="font-size:10px">:4</tspan></text>
+    <text
+       xml:space="preserve"
+       style="font-size:20px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+       x="361.97092"
+       y="291.88705"
+       id="text3013-3-36"
+       sodipodi:linespacing="125%"><tspan
+         sodipodi:role="line"
+         id="tspan3015-6-7"
+         x="361.97092"
+         y="291.88705"
+         style="font-size:10px">B</tspan></text>
+    <text
+       xml:space="preserve"
+       style="font-size:20px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+       x="405.40396"
+       y="291.88705"
+       id="text3013-3-36-3"
+       sodipodi:linespacing="125%"><tspan
+         sodipodi:role="line"
+         id="tspan3015-6-7-6"
+         x="405.40396"
+         y="291.88705"
+         style="font-size:10px">F</tspan></text>
+  </g>
+</svg>
diff --git a/Documentation/RCU/Design/Expedited-Grace-Periods/Funnel6.svg b/Documentation/RCU/Design/Expedited-Grace-Periods/Funnel6.svg
new file mode 100644 (file)
index 0000000..fbd2c18
--- /dev/null
@@ -0,0 +1,335 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<svg
+   xmlns:dc="http://purl.org/dc/elements/1.1/"
+   xmlns:cc="http://creativecommons.org/ns#"
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+   xmlns:svg="http://www.w3.org/2000/svg"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   width="490.05093"
+   height="125.78741"
+   id="svg2"
+   version="1.1"
+   inkscape:version="0.48.4 r9939"
+   sodipodi:docname="Funnel6.svg">
+  <defs
+     id="defs4">
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Lend"
+       style="overflow:visible">
+      <path
+         id="path3792"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+         inkscape:connector-curvature="0" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Lstart"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Lstart"
+       style="overflow:visible">
+      <path
+         id="path3789"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="matrix(1.1,0,0,1.1,1.1,0)"
+         inkscape:connector-curvature="0" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Lstart"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Lstart-4"
+       style="overflow:visible">
+      <path
+         id="path3789-9"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="matrix(1.1,0,0,1.1,1.1,0)"
+         inkscape:connector-curvature="0" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Lend-4"
+       style="overflow:visible">
+      <path
+         id="path3792-4"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+         inkscape:connector-curvature="0" />
+    </marker>
+  </defs>
+  <sodipodi:namedview
+     id="base"
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1.0"
+     inkscape:pageopacity="0.0"
+     inkscape:pageshadow="2"
+     inkscape:zoom="1.3670394"
+     inkscape:cx="114.01552"
+     inkscape:cy="-86.548414"
+     inkscape:document-units="px"
+     inkscape:current-layer="layer1"
+     showgrid="false"
+     inkscape:window-width="1351"
+     inkscape:window-height="836"
+     inkscape:window-x="68"
+     inkscape:window-y="180"
+     inkscape:window-maximized="0"
+     fit-margin-top="5"
+     fit-margin-left="5"
+     fit-margin-right="5"
+     fit-margin-bottom="5" />
+  <metadata
+     id="metadata7">
+    <rdf:RDF>
+      <cc:Work
+         rdf:about="">
+        <dc:format>image/svg+xml</dc:format>
+        <dc:type
+           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+        <dc:title />
+      </cc:Work>
+    </rdf:RDF>
+  </metadata>
+  <g
+     inkscape:label="Layer 1"
+     inkscape:groupmode="layer"
+     id="layer1"
+     transform="translate(-117.08462,-249.92053)">
+    <flowRoot
+       xml:space="preserve"
+       id="flowRoot2985"
+       style="font-size:10px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Symbol;-inkscape-font-specification:Symbol"><flowRegion
+         id="flowRegion2987"><rect
+           id="rect2989"
+           width="82.85714"
+           height="11.428572"
+           x="240"
+           y="492.36218" /></flowRegion><flowPara
+         id="flowPara2991" /></flowRoot>    <text
+       xml:space="preserve"
+       style="font-size:10px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Symbol;-inkscape-font-specification:Symbol"
+       x="362.371"
+       y="262.51819"
+       id="text4441"
+       sodipodi:linespacing="125%"><tspan
+         sodipodi:role="line"
+         id="tspan4443"
+         x="362.371"
+         y="262.51819">-&gt;expedited_sequence: 2  GP: E  Wakeup: A</tspan></text>
+    <rect
+       style="fill:none;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0"
+       id="rect3101"
+       width="43.158947"
+       height="26.33428"
+       x="253.55223"
+       y="275.07489" />
+    <rect
+       style="fill:#ff8282;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;fill-opacity:1"
+       id="rect3101-3"
+       width="43.158947"
+       height="26.33428"
+       x="297.04141"
+       y="275.07489" />
+    <rect
+       style="fill:#ff8282;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;fill-opacity:1"
+       id="rect3101-3-6"
+       width="43.158947"
+       height="26.33428"
+       x="427.509"
+       y="275.07489" />
+    <rect
+       style="fill:#ff8282;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;fill-opacity:1"
+       id="rect3101-3-6-7"
+       width="43.158947"
+       height="26.33428"
+       x="384.01981"
+       y="275.07489" />
+    <rect
+       style="fill:#ff8282;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;fill-opacity:1"
+       id="rect3101-3-6-7-5"
+       width="43.158947"
+       height="26.33428"
+       x="340.53061"
+       y="275.07489" />
+    <g
+       id="g3997"
+       transform="translate(-0.87295532,0)">
+      <rect
+         y="343.37366"
+         x="123.95757"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-35"
+         style="fill:none;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0" />
+      <rect
+         y="343.37366"
+         x="167.44673"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-3-62"
+         style="fill:#ff8282;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;fill-opacity:1" />
+      <rect
+         y="343.37366"
+         x="297.91437"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-3-6-9"
+         style="fill:#ff8282;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;fill-opacity:1" />
+      <rect
+         y="343.37366"
+         x="254.42516"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-3-6-7-1"
+         style="fill:#ff8282;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;fill-opacity:1" />
+      <rect
+         y="343.37366"
+         x="210.93593"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-3-6-7-5-2"
+         style="fill:#ff8282;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;fill-opacity:1" />
+      <text
+         xml:space="preserve"
+         style="font-size:20px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+         x="146.00092"
+         y="360.25174"
+         id="text3013"
+         sodipodi:linespacing="125%"><tspan
+           sodipodi:role="line"
+           id="tspan3015"
+           x="146.00092"
+           y="360.25174"
+           style="font-size:10px">:4</tspan></text>
+      <text
+         xml:space="preserve"
+         style="font-size:20px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+         x="232.51051"
+         y="360.18094"
+         id="text3013-3-3"
+         sodipodi:linespacing="125%"><tspan
+           sodipodi:role="line"
+           id="tspan3015-6-6"
+           x="232.51051"
+           y="360.18094"
+           style="font-size:10px">C</tspan></text>
+    </g>
+    <g
+       id="g3019"
+       transform="translate(260.06223,0)">
+      <rect
+         y="343.37366"
+         x="123.95757"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-35-0"
+         style="fill:none;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0" />
+      <rect
+         y="343.37366"
+         x="167.44673"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-3-62-9"
+         style="fill:#ff8282;fill-opacity:1;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0" />
+      <rect
+         y="343.37366"
+         x="297.91437"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-3-6-9-3"
+         style="fill:#ff8282;fill-opacity:1;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0" />
+      <rect
+         y="343.37366"
+         x="254.42516"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-3-6-7-1-6"
+         style="fill:#ff8282;fill-opacity:1;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0" />
+      <rect
+         y="343.37366"
+         x="210.93593"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-3-6-7-5-2-0"
+         style="fill:#ff8282;fill-opacity:1;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0" />
+      <text
+         xml:space="preserve"
+         style="font-size:20px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+         x="145.54926"
+         y="360.25174"
+         id="text3013-3"
+         sodipodi:linespacing="125%"><tspan
+           sodipodi:role="line"
+           id="tspan3015-6"
+           x="145.54926"
+           y="360.25174"
+           style="font-size:10px">:4</tspan></text>
+      <text
+         xml:space="preserve"
+         style="font-size:20px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+         x="232.31764"
+         y="360.18582"
+         id="text3013-3-3-7"
+         sodipodi:linespacing="125%"><tspan
+           sodipodi:role="line"
+           id="tspan3015-6-6-5"
+           x="232.31764"
+           y="360.18582"
+           style="font-size:10px">D</tspan></text>
+    </g>
+    <text
+       xml:space="preserve"
+       style="font-size:20px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+       x="275.59558"
+       y="291.95297"
+       id="text3013-36"
+       sodipodi:linespacing="125%"><tspan
+         sodipodi:role="line"
+         id="tspan3015-7"
+         x="275.59558"
+         y="291.95297"
+         style="font-size:10px">:4</tspan></text>
+    <text
+       xml:space="preserve"
+       style="font-size:20px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+       x="361.97092"
+       y="291.88705"
+       id="text3013-3-36"
+       sodipodi:linespacing="125%"><tspan
+         sodipodi:role="line"
+         id="tspan3015-6-7"
+         x="361.97092"
+         y="291.88705"
+         style="font-size:10px">B</tspan></text>
+    <text
+       xml:space="preserve"
+       style="font-size:20px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+       x="405.40396"
+       y="291.88705"
+       id="text3013-3-36-3"
+       sodipodi:linespacing="125%"><tspan
+         sodipodi:role="line"
+         id="tspan3015-6-7-6"
+         x="405.40396"
+         y="291.88705"
+         style="font-size:10px">F</tspan></text>
+  </g>
+</svg>
diff --git a/Documentation/RCU/Design/Expedited-Grace-Periods/Funnel7.svg b/Documentation/RCU/Design/Expedited-Grace-Periods/Funnel7.svg
new file mode 100644 (file)
index 0000000..502e159
--- /dev/null
@@ -0,0 +1,347 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<svg
+   xmlns:dc="http://purl.org/dc/elements/1.1/"
+   xmlns:cc="http://creativecommons.org/ns#"
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+   xmlns:svg="http://www.w3.org/2000/svg"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   width="490.05093"
+   height="125.78741"
+   id="svg2"
+   version="1.1"
+   inkscape:version="0.48.4 r9939"
+   sodipodi:docname="Funnel7.svg">
+  <defs
+     id="defs4">
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Lend"
+       style="overflow:visible">
+      <path
+         id="path3792"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+         inkscape:connector-curvature="0" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Lstart"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Lstart"
+       style="overflow:visible">
+      <path
+         id="path3789"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="matrix(1.1,0,0,1.1,1.1,0)"
+         inkscape:connector-curvature="0" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Lstart"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Lstart-4"
+       style="overflow:visible">
+      <path
+         id="path3789-9"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="matrix(1.1,0,0,1.1,1.1,0)"
+         inkscape:connector-curvature="0" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Lend-4"
+       style="overflow:visible">
+      <path
+         id="path3792-4"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+         inkscape:connector-curvature="0" />
+    </marker>
+  </defs>
+  <sodipodi:namedview
+     id="base"
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1.0"
+     inkscape:pageopacity="0.0"
+     inkscape:pageshadow="2"
+     inkscape:zoom="1.3670394"
+     inkscape:cx="114.01552"
+     inkscape:cy="-86.548414"
+     inkscape:document-units="px"
+     inkscape:current-layer="layer1"
+     showgrid="false"
+     inkscape:window-width="1351"
+     inkscape:window-height="836"
+     inkscape:window-x="68"
+     inkscape:window-y="180"
+     inkscape:window-maximized="0"
+     fit-margin-top="5"
+     fit-margin-left="5"
+     fit-margin-right="5"
+     fit-margin-bottom="5" />
+  <metadata
+     id="metadata7">
+    <rdf:RDF>
+      <cc:Work
+         rdf:about="">
+        <dc:format>image/svg+xml</dc:format>
+        <dc:type
+           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+        <dc:title />
+      </cc:Work>
+    </rdf:RDF>
+  </metadata>
+  <g
+     inkscape:label="Layer 1"
+     inkscape:groupmode="layer"
+     id="layer1"
+     transform="translate(-117.08462,-249.92053)">
+    <flowRoot
+       xml:space="preserve"
+       id="flowRoot2985"
+       style="font-size:10px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Symbol;-inkscape-font-specification:Symbol"><flowRegion
+         id="flowRegion2987"><rect
+           id="rect2989"
+           width="82.85714"
+           height="11.428572"
+           x="240"
+           y="492.36218" /></flowRegion><flowPara
+         id="flowPara2991" /></flowRoot>    <text
+       xml:space="preserve"
+       style="font-size:10px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Symbol;-inkscape-font-specification:Symbol"
+       x="362.371"
+       y="262.51819"
+       id="text4441"
+       sodipodi:linespacing="125%"><tspan
+         sodipodi:role="line"
+         id="tspan4443"
+         x="362.371"
+         y="262.51819">-&gt;expedited_sequence: 3  GP: E,H  Wakeup: A</tspan></text>
+    <rect
+       style="fill:none;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0"
+       id="rect3101"
+       width="43.158947"
+       height="26.33428"
+       x="253.55223"
+       y="275.07489" />
+    <rect
+       style="fill:#ff8282;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;fill-opacity:1"
+       id="rect3101-3"
+       width="43.158947"
+       height="26.33428"
+       x="297.04141"
+       y="275.07489" />
+    <rect
+       style="fill:#ff8282;fill-opacity:1;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0"
+       id="rect3101-3-6"
+       width="43.158947"
+       height="26.33428"
+       x="427.509"
+       y="275.07489" />
+    <rect
+       style="fill:#ff8282;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;fill-opacity:1"
+       id="rect3101-3-6-7"
+       width="43.158947"
+       height="26.33428"
+       x="384.01981"
+       y="275.07489" />
+    <rect
+       style="fill:#ff8282;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;fill-opacity:1"
+       id="rect3101-3-6-7-5"
+       width="43.158947"
+       height="26.33428"
+       x="340.53061"
+       y="275.07489" />
+    <g
+       id="g3997"
+       transform="translate(-0.87295532,0)">
+      <rect
+         y="343.37366"
+         x="123.95757"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-35"
+         style="fill:none;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0" />
+      <rect
+         y="343.37366"
+         x="167.44673"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-3-62"
+         style="fill:#ff8282;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;fill-opacity:1" />
+      <rect
+         y="343.37366"
+         x="297.91437"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-3-6-9"
+         style="fill:#ff8282;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;fill-opacity:1" />
+      <rect
+         y="343.37366"
+         x="254.42516"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-3-6-7-1"
+         style="fill:#ff8282;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;fill-opacity:1" />
+      <rect
+         y="343.37366"
+         x="210.93593"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-3-6-7-5-2"
+         style="fill:#ff8282;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;fill-opacity:1" />
+      <text
+         xml:space="preserve"
+         style="font-size:20px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+         x="146.00092"
+         y="360.25174"
+         id="text3013"
+         sodipodi:linespacing="125%"><tspan
+           sodipodi:role="line"
+           id="tspan3015"
+           x="146.00092"
+           y="360.25174"
+           style="font-size:10px">:4</tspan></text>
+      <text
+         xml:space="preserve"
+         style="font-size:20px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+         x="232.51051"
+         y="360.18094"
+         id="text3013-3-3"
+         sodipodi:linespacing="125%"><tspan
+           sodipodi:role="line"
+           id="tspan3015-6-6"
+           x="232.51051"
+           y="360.18094"
+           style="font-size:10px">C</tspan></text>
+    </g>
+    <g
+       id="g3019"
+       transform="translate(260.06223,0)">
+      <rect
+         y="343.37366"
+         x="123.95757"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-35-0"
+         style="fill:none;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0" />
+      <rect
+         y="343.37366"
+         x="167.44673"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-3-62-9"
+         style="fill:#ff8282;fill-opacity:1;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0" />
+      <rect
+         y="343.37366"
+         x="297.91437"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-3-6-9-3"
+         style="fill:#ff8282;fill-opacity:1;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0" />
+      <rect
+         y="343.37366"
+         x="254.42516"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-3-6-7-1-6"
+         style="fill:#ff8282;fill-opacity:1;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0" />
+      <rect
+         y="343.37366"
+         x="210.93593"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-3-6-7-5-2-0"
+         style="fill:#ff8282;fill-opacity:1;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0" />
+      <text
+         xml:space="preserve"
+         style="font-size:20px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+         x="145.54926"
+         y="360.25174"
+         id="text3013-3"
+         sodipodi:linespacing="125%"><tspan
+           sodipodi:role="line"
+           id="tspan3015-6"
+           x="145.54926"
+           y="360.25174"
+           style="font-size:10px">:6</tspan></text>
+      <text
+         xml:space="preserve"
+         style="font-size:20px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+         x="232.31764"
+         y="360.18582"
+         id="text3013-3-3-7"
+         sodipodi:linespacing="125%"><tspan
+           sodipodi:role="line"
+           id="tspan3015-6-6-5"
+           x="232.31764"
+           y="360.18582"
+           style="font-size:10px">D</tspan></text>
+    </g>
+    <text
+       xml:space="preserve"
+       style="font-size:20px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+       x="275.59558"
+       y="291.95297"
+       id="text3013-36"
+       sodipodi:linespacing="125%"><tspan
+         sodipodi:role="line"
+         id="tspan3015-7"
+         x="275.59558"
+         y="291.95297"
+         style="font-size:10px">:6</tspan></text>
+    <text
+       xml:space="preserve"
+       style="font-size:20px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+       x="361.97092"
+       y="291.88705"
+       id="text3013-3-36"
+       sodipodi:linespacing="125%"><tspan
+         sodipodi:role="line"
+         id="tspan3015-6-7"
+         x="361.97092"
+         y="291.88705"
+         style="font-size:10px">B</tspan></text>
+    <text
+       xml:space="preserve"
+       style="font-size:20px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+       x="405.40396"
+       y="291.88705"
+       id="text3013-3-36-3"
+       sodipodi:linespacing="125%"><tspan
+         sodipodi:role="line"
+         id="tspan3015-6-7-6"
+         x="405.40396"
+         y="291.88705"
+         style="font-size:10px">F</tspan></text>
+    <text
+       xml:space="preserve"
+       style="font-size:20px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+       x="449.22031"
+       y="291.88217"
+       id="text3013-3-36-3-3"
+       sodipodi:linespacing="125%"><tspan
+         sodipodi:role="line"
+         id="tspan3015-6-7-6-6"
+         x="449.22031"
+         y="291.88217"
+         style="font-size:10px">G</tspan></text>
+  </g>
+</svg>
diff --git a/Documentation/RCU/Design/Expedited-Grace-Periods/Funnel8.svg b/Documentation/RCU/Design/Expedited-Grace-Periods/Funnel8.svg
new file mode 100644 (file)
index 0000000..6774015
--- /dev/null
@@ -0,0 +1,311 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<svg
+   xmlns:dc="http://purl.org/dc/elements/1.1/"
+   xmlns:cc="http://creativecommons.org/ns#"
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+   xmlns:svg="http://www.w3.org/2000/svg"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   width="490.05093"
+   height="125.78741"
+   id="svg2"
+   version="1.1"
+   inkscape:version="0.48.4 r9939"
+   sodipodi:docname="Funnel8.svg">
+  <defs
+     id="defs4">
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Lend"
+       style="overflow:visible">
+      <path
+         id="path3792"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+         inkscape:connector-curvature="0" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Lstart"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Lstart"
+       style="overflow:visible">
+      <path
+         id="path3789"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="matrix(1.1,0,0,1.1,1.1,0)"
+         inkscape:connector-curvature="0" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Lstart"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Lstart-4"
+       style="overflow:visible">
+      <path
+         id="path3789-9"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="matrix(1.1,0,0,1.1,1.1,0)"
+         inkscape:connector-curvature="0" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Lend-4"
+       style="overflow:visible">
+      <path
+         id="path3792-4"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+         inkscape:connector-curvature="0" />
+    </marker>
+  </defs>
+  <sodipodi:namedview
+     id="base"
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1.0"
+     inkscape:pageopacity="0.0"
+     inkscape:pageshadow="2"
+     inkscape:zoom="1.3670394"
+     inkscape:cx="114.01552"
+     inkscape:cy="-86.548414"
+     inkscape:document-units="px"
+     inkscape:current-layer="layer1"
+     showgrid="false"
+     inkscape:window-width="1351"
+     inkscape:window-height="836"
+     inkscape:window-x="68"
+     inkscape:window-y="180"
+     inkscape:window-maximized="0"
+     fit-margin-top="5"
+     fit-margin-left="5"
+     fit-margin-right="5"
+     fit-margin-bottom="5" />
+  <metadata
+     id="metadata7">
+    <rdf:RDF>
+      <cc:Work
+         rdf:about="">
+        <dc:format>image/svg+xml</dc:format>
+        <dc:type
+           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+        <dc:title />
+      </cc:Work>
+    </rdf:RDF>
+  </metadata>
+  <g
+     inkscape:label="Layer 1"
+     inkscape:groupmode="layer"
+     id="layer1"
+     transform="translate(-117.08462,-249.92053)">
+    <flowRoot
+       xml:space="preserve"
+       id="flowRoot2985"
+       style="font-size:10px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Symbol;-inkscape-font-specification:Symbol"><flowRegion
+         id="flowRegion2987"><rect
+           id="rect2989"
+           width="82.85714"
+           height="11.428572"
+           x="240"
+           y="492.36218" /></flowRegion><flowPara
+         id="flowPara2991" /></flowRoot>    <text
+       xml:space="preserve"
+       style="font-size:10px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Symbol;-inkscape-font-specification:Symbol"
+       x="362.371"
+       y="262.51819"
+       id="text4441"
+       sodipodi:linespacing="125%"><tspan
+         sodipodi:role="line"
+         id="tspan4443"
+         x="362.371"
+         y="262.51819">-&gt;expedited_sequence: 3  GP: E,H</tspan></text>
+    <rect
+       style="fill:none;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0"
+       id="rect3101"
+       width="43.158947"
+       height="26.33428"
+       x="253.55223"
+       y="275.07489" />
+    <rect
+       style="fill:#ff8282;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;fill-opacity:1"
+       id="rect3101-3"
+       width="43.158947"
+       height="26.33428"
+       x="297.04141"
+       y="275.07489" />
+    <rect
+       style="fill:#ff8282;fill-opacity:1;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0"
+       id="rect3101-3-6"
+       width="43.158947"
+       height="26.33428"
+       x="427.509"
+       y="275.07489" />
+    <rect
+       style="fill:#ff8282;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;fill-opacity:1"
+       id="rect3101-3-6-7"
+       width="43.158947"
+       height="26.33428"
+       x="384.01981"
+       y="275.07489" />
+    <rect
+       style="fill:#ff8282;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;fill-opacity:1"
+       id="rect3101-3-6-7-5"
+       width="43.158947"
+       height="26.33428"
+       x="340.53061"
+       y="275.07489" />
+    <g
+       id="g3997"
+       transform="translate(-0.87295532,0)">
+      <rect
+         y="343.37366"
+         x="123.95757"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-35"
+         style="fill:none;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0" />
+      <rect
+         y="343.37366"
+         x="167.44673"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-3-62"
+         style="fill:#ff8282;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;fill-opacity:1" />
+      <rect
+         y="343.37366"
+         x="297.91437"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-3-6-9"
+         style="fill:#ff8282;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;fill-opacity:1" />
+      <rect
+         y="343.37366"
+         x="254.42516"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-3-6-7-1"
+         style="fill:#ff8282;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;fill-opacity:1" />
+      <rect
+         y="343.37366"
+         x="210.93593"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-3-6-7-5-2"
+         style="fill:#ff8282;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;fill-opacity:1" />
+      <text
+         xml:space="preserve"
+         style="font-size:20px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+         x="146.00092"
+         y="360.25174"
+         id="text3013"
+         sodipodi:linespacing="125%"><tspan
+           sodipodi:role="line"
+           id="tspan3015"
+           x="146.00092"
+           y="360.25174"
+           style="font-size:10px">:4</tspan></text>
+    </g>
+    <g
+       id="g3019"
+       transform="translate(260.06223,0)">
+      <rect
+         y="343.37366"
+         x="123.95757"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-35-0"
+         style="fill:none;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0" />
+      <rect
+         y="343.37366"
+         x="167.44673"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-3-62-9"
+         style="fill:#ff8282;fill-opacity:1;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0" />
+      <rect
+         y="343.37366"
+         x="297.91437"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-3-6-9-3"
+         style="fill:#ff8282;fill-opacity:1;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0" />
+      <rect
+         y="343.37366"
+         x="254.42516"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-3-6-7-1-6"
+         style="fill:#ff8282;fill-opacity:1;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0" />
+      <rect
+         y="343.37366"
+         x="210.93593"
+         height="26.33428"
+         width="43.158947"
+         id="rect3101-3-6-7-5-2-0"
+         style="fill:#ff8282;fill-opacity:1;stroke:#000000;stroke-width:2;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0" />
+      <text
+         xml:space="preserve"
+         style="font-size:20px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+         x="145.54926"
+         y="360.25174"
+         id="text3013-3"
+         sodipodi:linespacing="125%"><tspan
+           sodipodi:role="line"
+           id="tspan3015-6"
+           x="145.54926"
+           y="360.25174"
+           style="font-size:10px">:6</tspan></text>
+    </g>
+    <text
+       xml:space="preserve"
+       style="font-size:20px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+       x="275.59558"
+       y="291.95297"
+       id="text3013-36"
+       sodipodi:linespacing="125%"><tspan
+         sodipodi:role="line"
+         id="tspan3015-7"
+         x="275.59558"
+         y="291.95297"
+         style="font-size:10px">:6</tspan></text>
+    <text
+       xml:space="preserve"
+       style="font-size:20px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+       x="405.40396"
+       y="291.88705"
+       id="text3013-3-36-3"
+       sodipodi:linespacing="125%"><tspan
+         sodipodi:role="line"
+         id="tspan3015-6-7-6"
+         x="405.40396"
+         y="291.88705"
+         style="font-size:10px">F</tspan></text>
+    <text
+       xml:space="preserve"
+       style="font-size:20px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+       x="449.22031"
+       y="291.88217"
+       id="text3013-3-36-3-3"
+       sodipodi:linespacing="125%"><tspan
+         sodipodi:role="line"
+         id="tspan3015-6-7-6-6"
+         x="449.22031"
+         y="291.88217"
+         style="font-size:10px">G</tspan></text>
+  </g>
+</svg>
index 39bcb74ea733c920d930a3a1191969a8de99a56a..21593496aca6f617957b667f4f3aa5d627e86c4d 100644 (file)
@@ -1480,7 +1480,7 @@ speed-of-light delays if nothing else.
 
 <p>
 Furthermore, uncertainty about external state is inherent in many cases.
-For example, a pair of veternarians might use heartbeat to determine
+For example, a pair of veterinarians might use heartbeat to determine
 whether or not a given cat was alive.
 But how long should they wait after the last heartbeat to decide that
 the cat is in fact dead?
@@ -1489,9 +1489,9 @@ mean that a relaxed cat would be considered to cycle between death
 and life more than 100 times per minute.
 Moreover, just as with human beings, a cat's heart might stop for
 some period of time, so the exact wait period is a judgment call.
-One of our pair of veternarians might wait 30 seconds before pronouncing
+One of our pair of veterinarians might wait 30 seconds before pronouncing
 the cat dead, while the other might insist on waiting a full minute.
-The two veternarians would then disagree on the state of the cat during
+The two veterinarians would then disagree on the state of the cat during
 the final 30 seconds of the minute following the last heartbeat.
 
 <p>
@@ -1945,7 +1945,7 @@ guard against mishaps and misuse:
 <ol>
 <li>   It is all too easy to forget to use <tt>rcu_read_lock()</tt>
        everywhere that it is needed, so kernels built with
-       <tt>CONFIG_PROVE_RCU=y</tt> will spat if
+       <tt>CONFIG_PROVE_RCU=y</tt> will splat if
        <tt>rcu_dereference()</tt> is used outside of an
        RCU read-side critical section.
        Update-side code can use <tt>rcu_dereference_protected()</tt>,
@@ -2421,7 +2421,7 @@ However, there are some restrictions on the code placed within
 <li>   Blocking is prohibited.
        In practice, this is not a serious restriction given that idle
        tasks are prohibited from blocking to begin with.
-<li>   Although nesting <tt>RCU_NONIDLE()</tt> is permited, they cannot
+<li>   Although nesting <tt>RCU_NONIDLE()</tt> is permitted, they cannot
        nest indefinitely deeply.
        However, given that they can be nested on the order of a million
        deep, even on 32-bit systems, this should not be a serious
@@ -2885,7 +2885,7 @@ APIs for defining and initializing <tt>srcu_struct</tt> structures.
 <h3><a name="Tasks RCU">Tasks RCU</a></h3>
 
 <p>
-Some forms of tracing use &ldquo;tramopolines&rdquo; to handle the
+Some forms of tracing use &ldquo;trampolines&rdquo; to handle the
 binary rewriting required to install different types of probes.
 It would be good to be able to free old trampolines, which sounds
 like a job for some form of RCU.
index 00a3a38b375ae9946425fc2ea94fa0c2383e867c..6549012033f9d369cbb39c165c45d2d6c65f4154 100644 (file)
@@ -237,7 +237,7 @@ o   "ktl" is the low-order 16 bits (in hexadecimal) of the count of
 
 The output of "cat rcu/rcu_preempt/rcuexp" looks as follows:
 
-s=21872 wd1=0 wd2=0 wd3=5 n=0 enq=0 sc=21872
+s=21872 wd1=0 wd2=0 wd3=5 enq=0 sc=21872
 
 These fields are as follows:
 
@@ -249,9 +249,6 @@ o   "wd1", "wd2", and "wd3" are the number of times that an attempt
        completed an expedited grace period that satisfies the attempted
        request.  "Our work is done."
 
-o      "n" is number of times that a concurrent CPU-hotplug operation
-       forced a fallback to a normal grace period.
-
 o      "enq" is the number of quiescent states still outstanding.
 
 o      "sc" is the number of times that the attempt to start a
index be7c0d9506b12072219f0396ceb9072eeea03df8..635d11135090cd7fc604c2c3496359a5b7a50f73 100644 (file)
                        loops can be debugged more effectively on production
                        systems.
 
-       clocksource.arm_arch_timer.fsl-a008585=
-                       [ARM64]
-                       Format: <bool>
-                       Enable/disable the workaround of Freescale/NXP
-                       erratum A-008585.  This can be useful for KVM
-                       guests, if the guest device tree doesn't show the
-                       erratum.  If unspecified, the workaround is
-                       enabled based on the device tree.
-
        clearcpuid=BITNUM [X86]
                        Disable CPUID feature X for the kernel. See
                        arch/x86/include/asm/cpufeatures.h for the valid bit
                        Lazy RCU callbacks are those which RCU can
                        prove do nothing more than free memory.
 
+       rcutree.rcu_kick_kthreads= [KNL]
+                       Cause the grace-period kthread to get an extra
+                       wake_up() if it sleeps three times longer than
+                       it should at force-quiescent-state time.
+                       This wake_up() will be accompanied by a
+                       WARN_ONCE() splat and an ftrace_dump().
+
        rcuperf.gp_exp= [KNL]
                        Measure performance of expedited synchronous
                        grace-period primitives.
        rhash_entries=  [KNL,NET]
                        Set number of hash buckets for route cache
 
+       ring3mwait=disable
+                       [KNL] Disable ring 3 MONITOR/MWAIT feature on supported
+                       CPUs.
+
        ro              [KNL] Mount root device read-only on boot
 
        rodata=         [KNL]
index d71340e86c27590ff139d309b57e37d755c8dfd7..9939348bd4a3dfae23dc2e3cd4b697dd0c8151dc 100644 (file)
@@ -438,11 +438,13 @@ A typical EDAC system has the following structure under
        â”‚   â”‚   â”œâ”€â”€ ce_count
        â”‚   â”‚   â”œâ”€â”€ ce_noinfo_count
        â”‚   â”‚   â”œâ”€â”€ dimm0
+       â”‚   â”‚   â”‚   â”œâ”€â”€ dimm_ce_count
        â”‚   â”‚   â”‚   â”œâ”€â”€ dimm_dev_type
        â”‚   â”‚   â”‚   â”œâ”€â”€ dimm_edac_mode
        â”‚   â”‚   â”‚   â”œâ”€â”€ dimm_label
        â”‚   â”‚   â”‚   â”œâ”€â”€ dimm_location
        â”‚   â”‚   â”‚   â”œâ”€â”€ dimm_mem_type
+       â”‚   â”‚   â”‚   â”œâ”€â”€ dimm_ue_count
        â”‚   â”‚   â”‚   â”œâ”€â”€ size
        â”‚   â”‚   â”‚   â””── uevent
        â”‚   â”‚   â”œâ”€â”€ max_location
@@ -457,11 +459,13 @@ A typical EDAC system has the following structure under
        â”‚   â”‚   â”œâ”€â”€ ce_count
        â”‚   â”‚   â”œâ”€â”€ ce_noinfo_count
        â”‚   â”‚   â”œâ”€â”€ dimm0
+       â”‚   â”‚   â”‚   â”œâ”€â”€ dimm_ce_count
        â”‚   â”‚   â”‚   â”œâ”€â”€ dimm_dev_type
        â”‚   â”‚   â”‚   â”œâ”€â”€ dimm_edac_mode
        â”‚   â”‚   â”‚   â”œâ”€â”€ dimm_label
        â”‚   â”‚   â”‚   â”œâ”€â”€ dimm_location
        â”‚   â”‚   â”‚   â”œâ”€â”€ dimm_mem_type
+       â”‚   â”‚   â”‚   â”œâ”€â”€ dimm_ue_count
        â”‚   â”‚   â”‚   â”œâ”€â”€ size
        â”‚   â”‚   â”‚   â””── uevent
        â”‚   â”‚   â”œâ”€â”€ max_location
@@ -483,6 +487,22 @@ this ``X`` memory module:
        This attribute file displays, in count of megabytes, the memory
        that this csrow contains.
 
+- ``dimm_ue_count`` - Uncorrectable Errors count attribute file
+
+       This attribute file displays the total count of uncorrectable
+       errors that have occurred on this DIMM. If panic_on_ue is set
+       this counter will not have a chance to increment, since EDAC
+       will panic the system.
+
+- ``dimm_ce_count`` - Correctable Errors count attribute file
+
+       This attribute file displays the total count of correctable
+       errors that have occurred on this DIMM. This count is very
+       important to examine. CEs provide early indications that a
+       DIMM is beginning to fail. This count field should be
+       monitored for non-zero values and report such information
+       to the system administrator.
+
 - ``dimm_dev_type``  - Device type attribute file
 
        This attribute file will display what type of DRAM device is
index 4bc7287806de461823b2a3dedeb84322ad522a80..978463a7c81ea59ea24f2e967f9683321ef1188e 100644 (file)
@@ -8,6 +8,8 @@
 
                    Dominik Brodowski  <linux@brodo.de>
                     David Kimdon <dwhedon@debian.org>
+               Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+                  Viresh Kumar <viresh.kumar@linaro.org>
 
 
 
@@ -36,10 +38,11 @@ speed limits (like LCD drivers on ARM architecture). Additionally, the
 kernel "constant" loops_per_jiffy is updated on frequency changes
 here.
 
-Reference counting is done by cpufreq_get_cpu and cpufreq_put_cpu,
-which make sure that the cpufreq processor driver is correctly
-registered with the core, and will not be unloaded until
-cpufreq_put_cpu is called.
+Reference counting of the cpufreq policies is done by cpufreq_cpu_get
+and cpufreq_cpu_put, which make sure that the cpufreq driver is
+correctly registered with the core, and will not be unloaded until
+cpufreq_put_cpu is called. That also ensures that the respective cpufreq
+policy doesn't get freed while being used.
 
 2. CPUFreq notifiers
 ====================
@@ -69,18 +72,16 @@ CPUFreq policy notifier is called twice for a policy transition:
 The phase is specified in the second argument to the notifier.
 
 The third argument, a void *pointer, points to a struct cpufreq_policy
-consisting of five values: cpu, min, max, policy and max_cpu_freq. min 
-and max are the lower and upper frequencies (in kHz) of the new
-policy, policy the new policy, cpu the number of the affected CPU; and 
-max_cpu_freq the maximum supported CPU frequency. This value is given 
-for informational purposes only.
+consisting of several values, including min, max (the lower and upper
+frequencies (in kHz) of the new policy).
 
 
 2.2 CPUFreq transition notifiers
 --------------------------------
 
-These are notified twice when the CPUfreq driver switches the CPU core
-frequency and this change has any external implications.
+These are notified twice for each online CPU in the policy, when the
+CPUfreq driver switches the CPU core frequency and this change has no
+any external implications.
 
 The second argument specifies the phase - CPUFREQ_PRECHANGE or
 CPUFREQ_POSTCHANGE.
@@ -90,6 +91,7 @@ values:
 cpu    - number of the affected CPU
 old    - old frequency
 new    - new frequency
+flags  - flags of the cpufreq driver
 
 3. CPUFreq Table Generation with Operating Performance Point (OPP)
 ==================================================================
index 772b94fde2640a956db760fe8408af82d6ab1450..f71e6be26b83b48466353a58e5fc45ac75c239b8 100644 (file)
@@ -9,6 +9,8 @@
 
 
                    Dominik Brodowski  <linux@brodo.de>
+               Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+                  Viresh Kumar <viresh.kumar@linaro.org>
 
 
 
@@ -49,49 +51,65 @@ using cpufreq_register_driver()
 
 What shall this struct cpufreq_driver contain? 
 
-cpufreq_driver.name -          The name of this driver.
+ .name - The name of this driver.
 
-cpufreq_driver.init -          A pointer to the per-CPU initialization 
-                               function.
+ .init - A pointer to the per-policy initialization function.
 
-cpufreq_driver.verify -                A pointer to a "verification" function.
+ .verify - A pointer to a "verification" function.
 
-cpufreq_driver.setpolicy _or_ 
-cpufreq_driver.target/
-target_index           -       See below on the differences.
+ .setpolicy _or_ .fast_switch _or_ .target _or_ .target_index - See
+ below on the differences.
 
 And optionally
 
-cpufreq_driver.exit -          A pointer to a per-CPU cleanup
-                               function called during CPU_POST_DEAD
-                               phase of cpu hotplug process.
+ .flags - Hints for the cpufreq core.
 
-cpufreq_driver.stop_cpu -      A pointer to a per-CPU stop function
-                               called during CPU_DOWN_PREPARE phase of
-                               cpu hotplug process.
+ .driver_data - cpufreq driver specific data.
 
-cpufreq_driver.resume -                A pointer to a per-CPU resume function
-                               which is called with interrupts disabled
-                               and _before_ the pre-suspend frequency
-                               and/or policy is restored by a call to
-                               ->target/target_index or ->setpolicy.
+ .resolve_freq - Returns the most appropriate frequency for a target
+ frequency. Doesn't change the frequency though.
 
-cpufreq_driver.attr -          A pointer to a NULL-terminated list of
-                               "struct freq_attr" which allow to
-                               export values to sysfs.
+ .get_intermediate and target_intermediate - Used to switch to stable
+ frequency while changing CPU frequency.
 
-cpufreq_driver.get_intermediate
-and target_intermediate                Used to switch to stable frequency while
-                               changing CPU frequency.
+ .get - Returns current frequency of the CPU.
+
+ .bios_limit - Returns HW/BIOS max frequency limitations for the CPU.
+
+ .exit - A pointer to a per-policy cleanup function called during
+ CPU_POST_DEAD phase of cpu hotplug process.
+
+ .stop_cpu - A pointer to a per-policy stop function called during
+ CPU_DOWN_PREPARE phase of cpu hotplug process.
+
+ .suspend - A pointer to a per-policy suspend function which is called
+ with interrupts disabled and _after_ the governor is stopped for the
+ policy.
+
+ .resume - A pointer to a per-policy resume function which is called
+ with interrupts disabled and _before_ the governor is started again.
+
+ .ready - A pointer to a per-policy ready function which is called after
+ the policy is fully initialized.
+
+ .attr - A pointer to a NULL-terminated list of "struct freq_attr" which
+ allow to export values to sysfs.
+
+ .boost_enabled - If set, boost frequencies are enabled.
+
+ .set_boost - A pointer to a per-policy function to enable/disable boost
+ frequencies.
 
 
 1.2 Per-CPU Initialization
 --------------------------
 
 Whenever a new CPU is registered with the device model, or after the
-cpufreq driver registers itself, the per-CPU initialization function 
-cpufreq_driver.init is called. It takes a struct cpufreq_policy
-*policy as argument. What to do now?
+cpufreq driver registers itself, the per-policy initialization function
+cpufreq_driver.init is called if no cpufreq policy existed for the CPU.
+Note that the .init() and .exit() routines are called only once for the
+policy and not for each CPU managed by the policy. It takes a struct
+cpufreq_policy *policy as argument. What to do now?
 
 If necessary, activate the CPUfreq support on your CPU.
 
@@ -117,47 +135,45 @@ policy->governor          must contain the "default policy" for
                                cpufreq_driver.setpolicy or
                                cpufreq_driver.target/target_index is called
                                with these values.
+policy->cpus                   Update this with the masks of the
+                               (online + offline) CPUs that do DVFS
+                               along with this CPU (i.e.  that share
+                               clock/voltage rails with it).
 
 For setting some of these values (cpuinfo.min[max]_freq, policy->min[max]), the
 frequency table helpers might be helpful. See the section 2 for more information
 on them.
 
-SMP systems normally have same clock source for a group of cpus. For these the
-.init() would be called only once for the first online cpu. Here the .init()
-routine must initialize policy->cpus with mask of all possible cpus (Online +
-Offline) that share the clock. Then the core would copy this mask onto
-policy->related_cpus and will reset policy->cpus to carry only online cpus.
-
 
 1.3 verify
-------------
+----------
 
 When the user decides a new policy (consisting of
 "policy,governor,min,max") shall be set, this policy must be validated
 so that incompatible values can be corrected. For verifying these
-values, a frequency table helper and/or the
-cpufreq_verify_within_limits(struct cpufreq_policy *policy, unsigned
-int min_freq, unsigned int max_freq) function might be helpful. See
-section 2 for details on frequency table helpers.
+values cpufreq_verify_within_limits(struct cpufreq_policy *policy,
+unsigned int min_freq, unsigned int max_freq) function might be helpful.
+See section 2 for details on frequency table helpers.
 
 You need to make sure that at least one valid frequency (or operating
 range) is within policy->min and policy->max. If necessary, increase
 policy->max first, and only if this is no solution, decrease policy->min.
 
 
-1.4 target/target_index or setpolicy?
-----------------------------
+1.4 target or target_index or setpolicy or fast_switch?
+-------------------------------------------------------
 
 Most cpufreq drivers or even most cpu frequency scaling algorithms 
-only allow the CPU to be set to one frequency. For these, you use the
-->target/target_index call.
+only allow the CPU frequency to be set to predefined fixed values. For
+these, you use the ->target(), ->target_index() or ->fast_switch()
+callbacks.
 
-Some cpufreq-capable processors switch the frequency between certain
-limits on their own. These shall use the ->setpolicy call
+Some cpufreq capable processors switch the frequency between certain
+limits on their own. These shall use the ->setpolicy() callback.
 
 
 1.5. target/target_index
--------------
+------------------------
 
 The target_index call has two arguments: struct cpufreq_policy *policy,
 and unsigned int index (into the exposed frequency table).
@@ -186,9 +202,20 @@ actual frequency must be determined using the following rules:
 Here again the frequency table helper might assist you - see section 2
 for details.
 
+1.6. fast_switch
+----------------
 
-1.6 setpolicy
----------------
+This function is used for frequency switching from scheduler's context.
+Not all drivers are expected to implement it, as sleeping from within
+this callback isn't allowed. This callback must be highly optimized to
+do switching as fast as possible.
+
+This function has two arguments: struct cpufreq_policy *policy and
+unsigned int target_frequency.
+
+
+1.7 setpolicy
+-------------
 
 The setpolicy call only takes a struct cpufreq_policy *policy as
 argument. You need to set the lower limit of the in-processor or
@@ -198,7 +225,7 @@ setting when policy->policy is CPUFREQ_POLICY_PERFORMANCE, and a
 powersaving-oriented setting when CPUFREQ_POLICY_POWERSAVE. Also check
 the reference implementation in drivers/cpufreq/longrun.c
 
-1.7 get_intermediate and target_intermediate
+1.8 get_intermediate and target_intermediate
 --------------------------------------------
 
 Only for drivers with target_index() and CPUFREQ_ASYNC_NOTIFICATION unset.
@@ -222,42 +249,36 @@ failures as core would send notifications for that.
 
 As most cpufreq processors only allow for being set to a few specific
 frequencies, a "frequency table" with some functions might assist in
-some work of the processor driver. Such a "frequency table" consists
-of an array of struct cpufreq_frequency_table entries, with any value in
-"driver_data" you want to use, and the corresponding frequency in
-"frequency". At the end of the table, you need to add a
-cpufreq_frequency_table entry with frequency set to CPUFREQ_TABLE_END. And
-if you want to skip one entry in the table, set the frequency to 
-CPUFREQ_ENTRY_INVALID. The entries don't need to be in ascending
-order.
-
-By calling cpufreq_table_validate_and_show(struct cpufreq_policy *policy,
-                                       struct cpufreq_frequency_table *table);
-the cpuinfo.min_freq and cpuinfo.max_freq values are detected, and
-policy->min and policy->max are set to the same values. This is
-helpful for the per-CPU initialization stage.
-
-int cpufreq_frequency_table_verify(struct cpufreq_policy *policy,
-                                   struct cpufreq_frequency_table *table);
-assures that at least one valid frequency is within policy->min and
-policy->max, and all other criteria are met. This is helpful for the
-->verify call.
-
-int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
-                                   unsigned int target_freq,
-                                   unsigned int relation);
-
-is the corresponding frequency table helper for the ->target
-stage. Just pass the values to this function, and this function
-returns the number of the frequency table entry which contains
-the frequency the CPU shall be set to.
+some work of the processor driver. Such a "frequency table" consists of
+an array of struct cpufreq_frequency_table entries, with driver specific
+values in "driver_data", the corresponding frequency in "frequency" and
+flags set. At the end of the table, you need to add a
+cpufreq_frequency_table entry with frequency set to CPUFREQ_TABLE_END.
+And if you want to skip one entry in the table, set the frequency to
+CPUFREQ_ENTRY_INVALID. The entries don't need to be in sorted in any
+particular order, but if they are cpufreq core will do DVFS a bit
+quickly for them as search for best match is faster.
+
+By calling cpufreq_table_validate_and_show(), the cpuinfo.min_freq and
+cpuinfo.max_freq values are detected, and policy->min and policy->max
+are set to the same values. This is helpful for the per-CPU
+initialization stage.
+
+cpufreq_frequency_table_verify() assures that at least one valid
+frequency is within policy->min and policy->max, and all other criteria
+are met. This is helpful for the ->verify call.
+
+cpufreq_frequency_table_target() is the corresponding frequency table
+helper for the ->target stage. Just pass the values to this function,
+and this function returns the of the frequency table entry which
+contains the frequency the CPU shall be set to.
 
 The following macros can be used as iterators over cpufreq_frequency_table:
 
 cpufreq_for_each_entry(pos, table) - iterates over all entries of frequency
 table.
 
-cpufreq-for_each_valid_entry(pos, table) - iterates over all entries,
+cpufreq_for_each_valid_entry(pos, table) - iterates over all entries,
 excluding CPUFREQ_ENTRY_INVALID frequencies.
 Use arguments "pos" - a cpufreq_frequency_table * as a loop cursor and
 "table" - the cpufreq_frequency_table * you want to iterate over.
index 3c355f6ad83494e6de96a99f1a39ebf7e413a670..2bbe207354ed7c7e73ceb7f51ddb105c49827a1d 100644 (file)
@@ -34,10 +34,10 @@ cpufreq stats provides following statistics (explained in detail below).
 -  total_trans
 -  trans_table
 
-All the statistics will be from the time the stats driver has been inserted 
-to the time when a read of a particular statistic is done. Obviously, stats 
-driver will not have any information about the frequency transitions before
-the stats driver insertion.
+All the statistics will be from the time the stats driver has been inserted
+(or the time the stats were reset) to the time when a read of a particular
+statistic is done. Obviously, stats driver will not have any information
+about the frequency transitions before the stats driver insertion.
 
 --------------------------------------------------------------------------------
 <mysystem>:/sys/devices/system/cpu/cpu0/cpufreq/stats # ls -l
@@ -110,25 +110,13 @@ Config Main Menu
                CPU Frequency scaling  --->
                        [*] CPU Frequency scaling
                        [*]   CPU frequency translation statistics
-                       [*]     CPU frequency translation statistics details
 
 
 "CPU Frequency scaling" (CONFIG_CPU_FREQ) should be enabled to configure
 cpufreq-stats.
 
 "CPU frequency translation statistics" (CONFIG_CPU_FREQ_STAT) provides the
-basic statistics which includes time_in_state and total_trans.
+statistics which includes time_in_state, total_trans and trans_table.
 
-"CPU frequency translation statistics details" (CONFIG_CPU_FREQ_STAT_DETAILS)
-provides fine grained cpufreq stats by trans_table. The reason for having a
-separate config option for trans_table is:
-- trans_table goes against the traditional /sysfs rule of one value per
-  interface. It provides a whole bunch of value in a 2 dimensional matrix
-  form.
-
-Once these two options are enabled and your CPU supports cpufrequency, you
+Once this option is enabled and your CPU supports cpufrequency, you
 will be able to see the CPU frequency statistics in /sysfs.
-
-
-
-
index c15aa75f52275b703e3da46faff2a95e2ccfc6e9..61b3184b6c24e47c4a31c09763707e49a1d0de01 100644 (file)
@@ -10,6 +10,8 @@
 
                    Dominik Brodowski  <linux@brodo.de>
             some additions and corrections by Nico Golde <nico@ngolde.de>
+               Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+                  Viresh Kumar <viresh.kumar@linaro.org>
 
 
 
@@ -28,32 +30,27 @@ Contents:
 2.3  Userspace
 2.4  Ondemand
 2.5  Conservative
+2.6  Schedutil
 
 3.   The Governor Interface in the CPUfreq Core
 
+4.   References
 
 
 1. What Is A CPUFreq Governor?
 ==============================
 
 Most cpufreq drivers (except the intel_pstate and longrun) or even most
-cpu frequency scaling algorithms only offer the CPU to be set to one
-frequency. In order to offer dynamic frequency scaling, the cpufreq
-core must be able to tell these drivers of a "target frequency". So
-these specific drivers will be transformed to offer a "->target/target_index"
-call instead of the existing "->setpolicy" call. For "longrun", all
-stays the same, though.
+cpu frequency scaling algorithms only allow the CPU frequency to be set
+to predefined fixed values.  In order to offer dynamic frequency
+scaling, the cpufreq core must be able to tell these drivers of a
+"target frequency". So these specific drivers will be transformed to
+offer a "->target/target_index/fast_switch()" call instead of the
+"->setpolicy()" call. For set_policy drivers, all stays the same,
+though.
 
 How to decide what frequency within the CPUfreq policy should be used?
-That's done using "cpufreq governors". Two are already in this patch
--- they're the already existing "powersave" and "performance" which
-set the frequency statically to the lowest or highest frequency,
-respectively. At least two more such governors will be ready for
-addition in the near future, but likely many more as there are various
-different theories and models about dynamic frequency scaling
-around. Using such a generic interface as cpufreq offers to scaling
-governors, these can be tested extensively, and the best one can be
-selected for each specific use.
+That's done using "cpufreq governors".
 
 Basically, it's the following flow graph:
 
@@ -71,7 +68,7 @@ CPU can be set to switch independently         |         CPU can only be set
                    /                          the limits of policy->{min,max}
                   /                                \
                  /                                  \
-       Using the ->setpolicy call,              Using the ->target/target_index call,
+       Using the ->setpolicy call,              Using the ->target/target_index/fast_switch call,
            the limits and the                    the frequency closest
             "policy" is set.                     to target_freq is set.
                                                  It is assured that it
@@ -109,114 +106,159 @@ directory.
 2.4 Ondemand
 ------------
 
-The CPUfreq governor "ondemand" sets the CPU depending on the
-current usage. To do this the CPU must have the capability to
-switch the frequency very quickly.  There are a number of sysfs file
-accessible parameters:
-
-sampling_rate: measured in uS (10^-6 seconds), this is how often you
-want the kernel to look at the CPU usage and to make decisions on
-what to do about the frequency.  Typically this is set to values of
-around '10000' or more. It's default value is (cmp. with users-guide.txt):
-transition_latency * 1000
-Be aware that transition latency is in ns and sampling_rate is in us, so you
-get the same sysfs value by default.
-Sampling rate should always get adjusted considering the transition latency
-To set the sampling rate 750 times as high as the transition latency
-in the bash (as said, 1000 is default), do:
-echo `$(($(cat cpuinfo_transition_latency) * 750 / 1000)) \
-    >ondemand/sampling_rate
-
-sampling_rate_min:
-The sampling rate is limited by the HW transition latency:
-transition_latency * 100
-Or by kernel restrictions:
-If CONFIG_NO_HZ_COMMON is set, the limit is 10ms fixed.
-If CONFIG_NO_HZ_COMMON is not set or nohz=off boot parameter is used, the
-limits depend on the CONFIG_HZ option:
-HZ=1000: min=20000us  (20ms)
-HZ=250:  min=80000us  (80ms)
-HZ=100:  min=200000us (200ms)
-The highest value of kernel and HW latency restrictions is shown and
-used as the minimum sampling rate.
-
-up_threshold: defines what the average CPU usage between the samplings
-of 'sampling_rate' needs to be for the kernel to make a decision on
-whether it should increase the frequency.  For example when it is set
-to its default value of '95' it means that between the checking
-intervals the CPU needs to be on average more than 95% in use to then
-decide that the CPU frequency needs to be increased.  
-
-ignore_nice_load: this parameter takes a value of '0' or '1'. When
-set to '0' (its default), all processes are counted towards the
-'cpu utilisation' value.  When set to '1', the processes that are
-run with a 'nice' value will not count (and thus be ignored) in the
-overall usage calculation.  This is useful if you are running a CPU
-intensive calculation on your laptop that you do not care how long it
-takes to complete as you can 'nice' it and prevent it from taking part
-in the deciding process of whether to increase your CPU frequency.
-
-sampling_down_factor: this parameter controls the rate at which the
-kernel makes a decision on when to decrease the frequency while running
-at top speed. When set to 1 (the default) decisions to reevaluate load
-are made at the same interval regardless of current clock speed. But
-when set to greater than 1 (e.g. 100) it acts as a multiplier for the
-scheduling interval for reevaluating load when the CPU is at its top
-speed due to high load. This improves performance by reducing the overhead
-of load evaluation and helping the CPU stay at its top speed when truly
-busy, rather than shifting back and forth in speed. This tunable has no
-effect on behavior at lower speeds/lower CPU loads.
-
-powersave_bias: this parameter takes a value between 0 to 1000. It
-defines the percentage (times 10) value of the target frequency that
-will be shaved off of the target. For example, when set to 100 -- 10%,
-when ondemand governor would have targeted 1000 MHz, it will target
-1000 MHz - (10% of 1000 MHz) = 900 MHz instead. This is set to 0
-(disabled) by default.
-When AMD frequency sensitivity powersave bias driver --
-drivers/cpufreq/amd_freq_sensitivity.c is loaded, this parameter
-defines the workload frequency sensitivity threshold in which a lower
-frequency is chosen instead of ondemand governor's original target.
-The frequency sensitivity is a hardware reported (on AMD Family 16h
-Processors and above) value between 0 to 100% that tells software how
-the performance of the workload running on a CPU will change when
-frequency changes. A workload with sensitivity of 0% (memory/IO-bound)
-will not perform any better on higher core frequency, whereas a
-workload with sensitivity of 100% (CPU-bound) will perform better
-higher the frequency. When the driver is loaded, this is set to 400
-by default -- for CPUs running workloads with sensitivity value below
-40%, a lower frequency is chosen. Unloading the driver or writing 0
-will disable this feature.
+The CPUfreq governor "ondemand" sets the CPU frequency depending on the
+current system load. Load estimation is triggered by the scheduler
+through the update_util_data->func hook; when triggered, cpufreq checks
+the CPU-usage statistics over the last period and the governor sets the
+CPU accordingly.  The CPU must have the capability to switch the
+frequency very quickly.
+
+Sysfs files:
+
+* sampling_rate:
+
+  Measured in uS (10^-6 seconds), this is how often you want the kernel
+  to look at the CPU usage and to make decisions on what to do about the
+  frequency.  Typically this is set to values of around '10000' or more.
+  It's default value is (cmp. with users-guide.txt): transition_latency
+  * 1000.  Be aware that transition latency is in ns and sampling_rate
+  is in us, so you get the same sysfs value by default.  Sampling rate
+  should always get adjusted considering the transition latency to set
+  the sampling rate 750 times as high as the transition latency in the
+  bash (as said, 1000 is default), do:
+
+  $ echo `$(($(cat cpuinfo_transition_latency) * 750 / 1000)) > ondemand/sampling_rate
+
+* sampling_rate_min:
+
+  The sampling rate is limited by the HW transition latency:
+  transition_latency * 100
+
+  Or by kernel restrictions:
+  - If CONFIG_NO_HZ_COMMON is set, the limit is 10ms fixed.
+  - If CONFIG_NO_HZ_COMMON is not set or nohz=off boot parameter is
+    used, the limits depend on the CONFIG_HZ option:
+    HZ=1000: min=20000us  (20ms)
+    HZ=250:  min=80000us  (80ms)
+    HZ=100:  min=200000us (200ms)
+
+  The highest value of kernel and HW latency restrictions is shown and
+  used as the minimum sampling rate.
+
+* up_threshold:
+
+  This defines what the average CPU usage between the samplings of
+  'sampling_rate' needs to be for the kernel to make a decision on
+  whether it should increase the frequency.  For example when it is set
+  to its default value of '95' it means that between the checking
+  intervals the CPU needs to be on average more than 95% in use to then
+  decide that the CPU frequency needs to be increased.
+
+* ignore_nice_load:
+
+  This parameter takes a value of '0' or '1'. When set to '0' (its
+  default), all processes are counted towards the 'cpu utilisation'
+  value.  When set to '1', the processes that are run with a 'nice'
+  value will not count (and thus be ignored) in the overall usage
+  calculation.  This is useful if you are running a CPU intensive
+  calculation on your laptop that you do not care how long it takes to
+  complete as you can 'nice' it and prevent it from taking part in the
+  deciding process of whether to increase your CPU frequency.
+
+* sampling_down_factor:
+
+  This parameter controls the rate at which the kernel makes a decision
+  on when to decrease the frequency while running at top speed. When set
+  to 1 (the default) decisions to reevaluate load are made at the same
+  interval regardless of current clock speed. But when set to greater
+  than 1 (e.g. 100) it acts as a multiplier for the scheduling interval
+  for reevaluating load when the CPU is at its top speed due to high
+  load. This improves performance by reducing the overhead of load
+  evaluation and helping the CPU stay at its top speed when truly busy,
+  rather than shifting back and forth in speed. This tunable has no
+  effect on behavior at lower speeds/lower CPU loads.
+
+* powersave_bias:
+
+  This parameter takes a value between 0 to 1000. It defines the
+  percentage (times 10) value of the target frequency that will be
+  shaved off of the target. For example, when set to 100 -- 10%, when
+  ondemand governor would have targeted 1000 MHz, it will target
+  1000 MHz - (10% of 1000 MHz) = 900 MHz instead. This is set to 0
+  (disabled) by default.
+
+  When AMD frequency sensitivity powersave bias driver --
+  drivers/cpufreq/amd_freq_sensitivity.c is loaded, this parameter
+  defines the workload frequency sensitivity threshold in which a lower
+  frequency is chosen instead of ondemand governor's original target.
+  The frequency sensitivity is a hardware reported (on AMD Family 16h
+  Processors and above) value between 0 to 100% that tells software how
+  the performance of the workload running on a CPU will change when
+  frequency changes. A workload with sensitivity of 0% (memory/IO-bound)
+  will not perform any better on higher core frequency, whereas a
+  workload with sensitivity of 100% (CPU-bound) will perform better
+  higher the frequency. When the driver is loaded, this is set to 400 by
+  default -- for CPUs running workloads with sensitivity value below
+  40%, a lower frequency is chosen. Unloading the driver or writing 0
+  will disable this feature.
 
 
 2.5 Conservative
 ----------------
 
 The CPUfreq governor "conservative", much like the "ondemand"
-governor, sets the CPU depending on the current usage.  It differs in
-behaviour in that it gracefully increases and decreases the CPU speed
-rather than jumping to max speed the moment there is any load on the
-CPU.  This behaviour more suitable in a battery powered environment.
-The governor is tweaked in the same manner as the "ondemand" governor
-through sysfs with the addition of:
-
-freq_step: this describes what percentage steps the cpu freq should be
-increased and decreased smoothly by.  By default the cpu frequency will
-increase in 5% chunks of your maximum cpu frequency.  You can change this
-value to anywhere between 0 and 100 where '0' will effectively lock your
-CPU at a speed regardless of its load whilst '100' will, in theory, make
-it behave identically to the "ondemand" governor.
-
-down_threshold: same as the 'up_threshold' found for the "ondemand"
-governor but for the opposite direction.  For example when set to its
-default value of '20' it means that if the CPU usage needs to be below
-20% between samples to have the frequency decreased.
-
-sampling_down_factor: similar functionality as in "ondemand" governor.
-But in "conservative", it controls the rate at which the kernel makes
-a decision on when to decrease the frequency while running in any
-speed. Load for frequency increase is still evaluated every
-sampling rate.
+governor, sets the CPU frequency depending on the current usage.  It
+differs in behaviour in that it gracefully increases and decreases the
+CPU speed rather than jumping to max speed the moment there is any load
+on the CPU. This behaviour is more suitable in a battery powered
+environment.  The governor is tweaked in the same manner as the
+"ondemand" governor through sysfs with the addition of:
+
+* freq_step:
+
+  This describes what percentage steps the cpu freq should be increased
+  and decreased smoothly by.  By default the cpu frequency will increase
+  in 5% chunks of your maximum cpu frequency.  You can change this value
+  to anywhere between 0 and 100 where '0' will effectively lock your CPU
+  at a speed regardless of its load whilst '100' will, in theory, make
+  it behave identically to the "ondemand" governor.
+
+* down_threshold:
+
+  Same as the 'up_threshold' found for the "ondemand" governor but for
+  the opposite direction.  For example when set to its default value of
+  '20' it means that if the CPU usage needs to be below 20% between
+  samples to have the frequency decreased.
+
+* sampling_down_factor:
+
+  Similar functionality as in "ondemand" governor.  But in
+  "conservative", it controls the rate at which the kernel makes a
+  decision on when to decrease the frequency while running in any speed.
+  Load for frequency increase is still evaluated every sampling rate.
+
+
+2.6 Schedutil
+-------------
+
+The "schedutil" governor aims at better integration with the Linux
+kernel scheduler.  Load estimation is achieved through the scheduler's
+Per-Entity Load Tracking (PELT) mechanism, which also provides
+information about the recent load [1].  This governor currently does
+load based DVFS only for tasks managed by CFS. RT and DL scheduler tasks
+are always run at the highest frequency.  Unlike all the other
+governors, the code is located under the kernel/sched/ directory.
+
+Sysfs files:
+
+* rate_limit_us:
+
+  This contains a value in microseconds. The governor waits for
+  rate_limit_us time before reevaluating the load again, after it has
+  evaluated the load once.
+
+For an in-depth comparison with the other governors refer to [2].
+
 
 3. The Governor Interface in the CPUfreq Core
 =============================================
@@ -225,26 +267,10 @@ A new governor must register itself with the CPUfreq core using
 "cpufreq_register_governor". The struct cpufreq_governor, which has to
 be passed to that function, must contain the following values:
 
-governor->name -           A unique name for this governor
-governor->governor -       The governor callback function
-governor->owner        -           .THIS_MODULE for the governor module (if 
-                           appropriate)
-
-The governor->governor callback is called with the current (or to-be-set)
-cpufreq_policy struct for that CPU, and an unsigned int event. The
-following events are currently defined:
-
-CPUFREQ_GOV_START:   This governor shall start its duty for the CPU
-                    policy->cpu
-CPUFREQ_GOV_STOP:    This governor shall end its duty for the CPU
-                    policy->cpu
-CPUFREQ_GOV_LIMITS:  The limits for CPU policy->cpu have changed to
-                    policy->min and policy->max.
-
-If you need other "events" externally of your driver, _only_ use the
-cpufreq_governor_l(unsigned int cpu, unsigned int event) call to the
-CPUfreq core to ensure proper locking.
+governor->name - A unique name for this governor.
+governor->owner - .THIS_MODULE for the governor module (if appropriate).
 
+plus a set of hooks to the functions implementing the governor's logic.
 
 The CPUfreq governor may call the CPU processor driver using one of
 these two functions:
@@ -258,12 +284,18 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
                                    unsigned int relation);
 
 target_freq must be within policy->min and policy->max, of course.
-What's the difference between these two functions? When your governor
-still is in a direct code path of a call to governor->governor, the
-per-CPU cpufreq lock is still held in the cpufreq core, and there's
-no need to lock it again (in fact, this would cause a deadlock). So
-use __cpufreq_driver_target only in these cases. In all other cases 
-(for example, when there's a "daemonized" function that wakes up 
-every second), use cpufreq_driver_target to lock the cpufreq per-CPU
-lock before the command is passed to the cpufreq processor driver.
+What's the difference between these two functions? When your governor is
+in a direct code path of a call to governor callbacks, like
+governor->start(), the policy->rwsem is still held in the cpufreq core,
+and there's no need to lock it again (in fact, this would cause a
+deadlock). So use __cpufreq_driver_target only in these cases. In all
+other cases (for example, when there's a "daemonized" function that
+wakes up every second), use cpufreq_driver_target to take policy->rwsem
+before the command is passed to the cpufreq driver.
+
+4. References
+=============
+
+[1] Per-entity load tracking: https://lwn.net/Articles/531853/
+[2] Improvements in CPU frequency management: https://lwn.net/Articles/682391/
 
index dc024ab4054fc9d3728f0cda79cc9891e4178544..ef1d39247b054f332fc671bb0f781146866258c3 100644 (file)
 
 Documents in this directory:
 ----------------------------
+
+amd-powernow.txt -     AMD powernow driver specific file.
+
+boost.txt -            Frequency boosting support.
+
 core.txt       -       General description of the CPUFreq core and
-                       of CPUFreq notifiers
+                       of CPUFreq notifiers.
+
+cpu-drivers.txt -      How to implement a new cpufreq processor driver.
 
-cpu-drivers.txt -      How to implement a new cpufreq processor driver
+cpufreq-nforce2.txt -  nVidia nForce2 platform specific file.
+
+cpufreq-stats.txt -    General description of sysfs cpufreq stats.
 
 governors.txt  -       What are cpufreq governors and how to
                        implement them?
 
 index.txt      -       File index, Mailing list and Links (this document)
 
+intel-pstate.txt -     Intel pstate cpufreq driver specific file.
+
+pcc-cpufreq.txt -      PCC cpufreq driver specific file.
+
 user-guide.txt -       User Guide to CPUFreq
 
 
@@ -35,9 +48,7 @@ Mailing List
 ------------
 There is a CPU frequency changing CVS commit and general list where
 you can report bugs, problems or submit patches. To post a message,
-send an email to linux-pm@vger.kernel.org, to subscribe go to
-http://vger.kernel.org/vger-lists.html#linux-pm and follow the
-instructions there.
+send an email to linux-pm@vger.kernel.org.
 
 Links
 -----
@@ -48,7 +59,7 @@ how to access the CVS repository:
 * http://cvs.arm.linux.org.uk/
 
 the CPUFreq Mailing list:
-* http://vger.kernel.org/vger-lists.html#cpufreq
+* http://vger.kernel.org/vger-lists.html#linux-pm
 
 Clock and voltage scaling for the SA-1100:
 * http://www.lartmaker.nl/projects/scaling
index 1953994ef5e6bfe4b5897bdd427b087f361c63e0..3fdcdfd968ba1237ef61856c51463d123b9321a1 100644 (file)
@@ -85,6 +85,21 @@ Sysfs will show :
 Refer to "Intel® 64 and IA-32 Architectures Software Developer’s Manual
 Volume 3: System Programming Guide" to understand ratios.
 
+There is one more sysfs attribute in /sys/devices/system/cpu/intel_pstate/
+that can be used for controlling the operation mode of the driver:
+
+      status: Three settings are possible:
+      "off"     - The driver is not in use at this time.
+      "active"  - The driver works as a P-state governor (default).
+      "passive" - The driver works as a regular cpufreq one and collaborates
+                  with the generic cpufreq governors (it sets P-states as
+                  requested by those governors).
+      The current setting is returned by reads from this attribute.  Writing one
+      of the above strings to it changes the operation mode as indicated by that
+      string, if possible.  If HW-managed P-states (HWP) are enabled, it is not
+      possible to change the driver's operation mode and attempts to write to
+      this attribute will fail.
+
 cpufreq sysfs for Intel P-State
 
 Since this driver registers with cpufreq, cpufreq sysfs is also presented.
index 109e97bbab7717e8b9dd721b47a992ba5773ea31..107f6fdd7d14b126016dd9a8179452900d0d5b97 100644 (file)
@@ -18,7 +18,7 @@
 Contents:
 ---------
 1. Supported Architectures and Processors
-1.1 ARM
+1.1 ARM and ARM64
 1.2 x86
 1.3 sparc64
 1.4 ppc
@@ -37,16 +37,10 @@ Contents:
 1. Supported Architectures and Processors
 =========================================
 
-1.1 ARM
--------
-
-The following ARM processors are supported by cpufreq:
-
-ARM Integrator
-ARM-SA1100
-ARM-SA1110
-Intel PXA
+1.1 ARM and ARM64
+-----------------
 
+Almost all ARM and ARM64 platforms support CPU frequency scaling.
 
 1.2 x86
 -------
@@ -69,6 +63,7 @@ Transmeta Crusoe
 Transmeta Efficeon
 VIA Cyrix 3 / C3
 various processors on some ACPI 2.0-compatible systems [*]
+And many more
 
 [*] Only if "ACPI Processor Performance States" are available
 to the ACPI<->BIOS interface.
@@ -147,10 +142,19 @@ mounted it at /sys, the cpufreq interface is located in a subdirectory
 "cpufreq" within the cpu-device directory
 (e.g. /sys/devices/system/cpu/cpu0/cpufreq/ for the first CPU).
 
+affected_cpus :                        List of Online CPUs that require software
+                               coordination of frequency.
+
+cpuinfo_cur_freq :             Current frequency of the CPU as obtained from
+                               the hardware, in KHz. This is the frequency
+                               the CPU actually runs at.
+
 cpuinfo_min_freq :             this file shows the minimum operating
                                frequency the processor can run at(in kHz) 
+
 cpuinfo_max_freq :             this file shows the maximum operating
                                frequency the processor can run at(in kHz) 
+
 cpuinfo_transition_latency     The time it takes on this CPU to
                                switch between two frequencies in nano
                                seconds. If unknown or known to be
@@ -163,25 +167,30 @@ cpuinfo_transition_latency        The time it takes on this CPU to
                                userspace daemon. Make sure to not
                                switch the frequency too often
                                resulting in performance loss.
-scaling_driver :               this file shows what cpufreq driver is
-                               used to set the frequency on this CPU
+
+related_cpus :                 List of Online + Offline CPUs that need software
+                               coordination of frequency.
+
+scaling_available_frequencies : List of available frequencies, in KHz.
 
 scaling_available_governors :  this file shows the CPUfreq governors
                                available in this kernel. You can see the
                                currently activated governor in
 
+scaling_cur_freq :             Current frequency of the CPU as determined by
+                               the governor and cpufreq core, in KHz. This is
+                               the frequency the kernel thinks the CPU runs
+                               at.
+
+scaling_driver :               this file shows what cpufreq driver is
+                               used to set the frequency on this CPU
+
 scaling_governor,              and by "echoing" the name of another
                                governor you can change it. Please note
                                that some governors won't load - they only
                                work on some specific architectures or
                                processors.
 
-cpuinfo_cur_freq :             Current frequency of the CPU as obtained from
-                               the hardware, in KHz. This is the frequency
-                               the CPU actually runs at.
-
-scaling_available_frequencies : List of available frequencies, in KHz.
-
 scaling_min_freq and
 scaling_max_freq               show the current "policy limits" (in
                                kHz). By echoing new values into these
@@ -190,16 +199,11 @@ scaling_max_freq          show the current "policy limits" (in
                                first set scaling_max_freq, then
                                scaling_min_freq.
 
-affected_cpus :                        List of Online CPUs that require software
-                               coordination of frequency.
-
-related_cpus :                 List of Online + Offline CPUs that need software
-                               coordination of frequency.
-
-scaling_cur_freq :             Current frequency of the CPU as determined by
-                               the governor and cpufreq core, in KHz. This is
-                               the frequency the kernel thinks the CPU runs
-                               at.
+scaling_setspeed               This can be read to get the currently programmed
+                               value by the governor. This can be written to
+                               change the current frequency for a group of
+                               CPUs, represented by a policy. This is supported
+                               currently only by the userspace governor.
 
 bios_limit :                   If the BIOS tells the OS to limit a CPU to
                                lower frequencies, the user can read out the
index ad440a2b8051cfadad0f3d89c7cc76cb99b5508c..e926aea1147d49a06aadb5bc39cd4802afba7053 100644 (file)
@@ -31,6 +31,12 @@ to deliver its interrupts via SPIs.
   This also affects writes to the tval register, due to the implicit
   counter read.
 
+- hisilicon,erratum-161010101 : A boolean property. Indicates the
+  presence of Hisilicon erratum 161010101, which says that reading the
+  counters is unreliable in some cases, and reads may return a value 32
+  beyond the correct value. This also affects writes to the tval
+  registers, due to the implicit counter read.
+
 ** Optional properties:
 
 - arm,cpu-registers-not-fw-configured : Firmware does not initialize
diff --git a/Documentation/devicetree/bindings/cpufreq/ti-cpufreq.txt b/Documentation/devicetree/bindings/cpufreq/ti-cpufreq.txt
new file mode 100644 (file)
index 0000000..ba0e15a
--- /dev/null
@@ -0,0 +1,128 @@
+TI CPUFreq and OPP bindings
+================================
+
+Certain TI SoCs, like those in the am335x, am437x, am57xx, and dra7xx
+families support different OPPs depending on the silicon variant in use.
+The ti-cpufreq driver can use revision and an efuse value from the SoC to
+provide the OPP framework with supported hardware information. This is
+used to determine which OPPs from the operating-points-v2 table get enabled
+when it is parsed by the OPP framework.
+
+Required properties:
+--------------------
+In 'cpus' nodes:
+- operating-points-v2: Phandle to the operating-points-v2 table to use.
+
+In 'operating-points-v2' table:
+- compatible: Should be
+       - 'operating-points-v2-ti-cpu' for am335x, am43xx, and dra7xx/am57xx SoCs
+- syscon: A phandle pointing to a syscon node representing the control module
+         register space of the SoC.
+
+Optional properties:
+--------------------
+For each opp entry in 'operating-points-v2' table:
+- opp-supported-hw: Two bitfields indicating:
+       1. Which revision of the SoC the OPP is supported by
+       2. Which eFuse bits indicate this OPP is available
+
+       A bitwise AND is performed against these values and if any bit
+       matches, the OPP gets enabled.
+
+Example:
+--------
+
+/* From arch/arm/boot/dts/am33xx.dtsi */
+cpus {
+       #address-cells = <1>;
+       #size-cells = <0>;
+       cpu@0 {
+               compatible = "arm,cortex-a8";
+               device_type = "cpu";
+               reg = <0>;
+
+               operating-points-v2 = <&cpu0_opp_table>;
+
+               clocks = <&dpll_mpu_ck>;
+               clock-names = "cpu";
+
+               clock-latency = <300000>; /* From omap-cpufreq driver */
+       };
+};
+
+/*
+ * cpu0 has different OPPs depending on SoC revision and some on revisions
+ * 0x2 and 0x4 have eFuse bits that indicate if they are available or not
+ */
+cpu0_opp_table: opp-table {
+       compatible = "operating-points-v2-ti-cpu";
+       syscon = <&scm_conf>;
+
+       /*
+        * The three following nodes are marked with opp-suspend
+        * because they can not be enabled simultaneously on a
+        * single SoC.
+        */
+       opp50@300000000 {
+               opp-hz = /bits/ 64 <300000000>;
+               opp-microvolt = <950000 931000 969000>;
+               opp-supported-hw = <0x06 0x0010>;
+               opp-suspend;
+       };
+
+       opp100@275000000 {
+               opp-hz = /bits/ 64 <275000000>;
+               opp-microvolt = <1100000 1078000 1122000>;
+               opp-supported-hw = <0x01 0x00FF>;
+               opp-suspend;
+       };
+
+       opp100@300000000 {
+               opp-hz = /bits/ 64 <300000000>;
+               opp-microvolt = <1100000 1078000 1122000>;
+               opp-supported-hw = <0x06 0x0020>;
+               opp-suspend;
+       };
+
+       opp100@500000000 {
+               opp-hz = /bits/ 64 <500000000>;
+               opp-microvolt = <1100000 1078000 1122000>;
+               opp-supported-hw = <0x01 0xFFFF>;
+       };
+
+       opp100@600000000 {
+               opp-hz = /bits/ 64 <600000000>;
+               opp-microvolt = <1100000 1078000 1122000>;
+               opp-supported-hw = <0x06 0x0040>;
+       };
+
+       opp120@600000000 {
+               opp-hz = /bits/ 64 <600000000>;
+               opp-microvolt = <1200000 1176000 1224000>;
+               opp-supported-hw = <0x01 0xFFFF>;
+       };
+
+       opp120@720000000 {
+               opp-hz = /bits/ 64 <720000000>;
+               opp-microvolt = <1200000 1176000 1224000>;
+               opp-supported-hw = <0x06 0x0080>;
+       };
+
+       oppturbo@720000000 {
+               opp-hz = /bits/ 64 <720000000>;
+               opp-microvolt = <1260000 1234800 1285200>;
+               opp-supported-hw = <0x01 0xFFFF>;
+       };
+
+       oppturbo@800000000 {
+               opp-hz = /bits/ 64 <800000000>;
+               opp-microvolt = <1260000 1234800 1285200>;
+               opp-supported-hw = <0x06 0x0100>;
+       };
+
+       oppnitro@1000000000 {
+               opp-hz = /bits/ 64 <1000000000>;
+               opp-microvolt = <1325000 1298500 1351500>;
+               opp-supported-hw = <0x04 0x0200>;
+       };
+};
index d3ec8e676b6bf306309b42bdd4678403a1682c82..d085ef90d27c1f8b82645177169f71c3eb42d00f 100644 (file)
@@ -123,6 +123,20 @@ Detailed correlation between sub-blocks and power line according to Exynos SoC:
                |--- FSYS
                |--- FSYS2
 
+- In case of Exynos5433, there is VDD_INT power line as following:
+       VDD_INT |--- G2D (parent device)
+               |--- MSCL
+               |--- GSCL
+               |--- JPEG
+               |--- MFC
+               |--- HEVC
+               |--- BUS0
+               |--- BUS1
+               |--- BUS2
+               |--- PERIS (Fixed clock rate)
+               |--- PERIC (Fixed clock rate)
+               |--- FSYS  (Fixed clock rate)
+
 Example1:
        Show the AXI buses of Exynos3250 SoC. Exynos3250 divides the buses to
        power line (regulator). The MIF (Memory Interface) AXI bus is used to
diff --git a/Documentation/devicetree/bindings/hwmon/adc128d818.txt b/Documentation/devicetree/bindings/hwmon/adc128d818.txt
new file mode 100644 (file)
index 0000000..08bab0e
--- /dev/null
@@ -0,0 +1,38 @@
+TI ADC128D818 ADC System Monitor With Temperature Sensor
+--------------------------------------------------------
+
+Operation modes:
+
+ - Mode 0:  7 single-ended voltage readings (IN0-IN6),
+            1 temperature reading (internal)
+ - Mode 1:  8 single-ended voltage readings (IN0-IN7),
+            no temperature
+ - Mode 2:  4 pseudo-differential voltage readings
+              (IN0-IN1, IN3-IN2, IN4-IN5, IN7-IN6),
+            1 temperature reading (internal)
+ - Mode 3:  4 single-ended voltage readings (IN0-IN3),
+            2 pseudo-differential voltage readings
+              (IN4-IN5, IN7-IN6),
+            1 temperature reading (internal)
+
+If no operation mode is configured via device tree, the driver keeps the
+currently active chip operation mode (default is mode 0).
+
+
+Required node properties:
+
+ - compatible:  must be set to "ti,adc128d818"
+ - reg:         I2C address of the device
+
+Optional node properties:
+
+ - ti,mode:     Operation mode (see above).
+
+
+Example (operation mode 2):
+
+       adc128d818@1d {
+               compatible = "ti,adc128d818";
+               reg = <0x1d>;
+               ti,mode = <2>;
+       };
index e7fd921aa4f1a93f1ccbc8d658ddc9ab30625953..ea417a0d32af52adf5bd2ee63b3ee3060ae10c96 100644 (file)
@@ -4,6 +4,7 @@ Required properties:
 - compatible: one of
                "ti,lm70"
                "ti,tmp121"
+               "ti,tmp122"
                "ti,lm71"
                "ti,lm74"
 
index e8632486b9ef5166801a0eaaecefc99a4c534ab0..97581266e329730fa7260be8d1e14473f1c9dbc0 100644 (file)
@@ -33,6 +33,11 @@ Optional properties:
               LM90 "-ALERT" pin output.
               See interrupt-controller/interrupts.txt for the format.
 
+- #thermal-sensor-cells: should be set to 1. See thermal/thermal.txt for
+             details. See <include/dt-bindings/thermal/lm90.h> for the
+             definition of the local, remote and 2nd remote sensor index
+             constants.
+
 Example LM90 node:
 
 temp-sensor {
@@ -41,4 +46,5 @@ temp-sensor {
        vcc-supply = <&palmas_ldo6_reg>;
        interrupt-parent = <&gpio>;
        interrupts = <TEGRA_GPIO(O, 4) IRQ_TYPE_LEVEL_LOW>;
+       #thermal-sensor-cells = <1>;
 }
diff --git a/Documentation/devicetree/bindings/hwmon/sht15.txt b/Documentation/devicetree/bindings/hwmon/sht15.txt
new file mode 100644 (file)
index 0000000..6a80277
--- /dev/null
@@ -0,0 +1,19 @@
+Sensirion SHT15 Humidity and Temperature Sensor
+
+Required properties:
+
+ - "compatible": must be "sensirion,sht15".
+ - "data-gpios": GPIO connected to the data line.
+ - "clk-gpios": GPIO connected to the clock line.
+ - "vcc-supply": regulator that drives the VCC pin.
+
+Example:
+
+       sensor {
+               pinctrl-names = "default";
+               pinctrl-0 = <&pinctrl_sensor>;
+               compatible = "sensirion,sht15";
+               clk-gpios = <&gpio4 12 0>;
+               data-gpios = <&gpio4 13 0>;
+               vcc-supply = <&reg_sht15>;
+       };
diff --git a/Documentation/devicetree/bindings/hwmon/stts751.txt b/Documentation/devicetree/bindings/hwmon/stts751.txt
new file mode 100644 (file)
index 0000000..3ee1dc3
--- /dev/null
@@ -0,0 +1,15 @@
+* STTS751 thermometer.
+
+Required node properties:
+- compatible: "stts751"
+- reg: I2C bus address of the device
+
+Optional properties:
+- smbus-timeout-disable: when set, the smbus timeout function will be disabled
+
+Example stts751 node:
+
+temp-sensor {
+       compatible = "stts751";
+       reg = <0x48>;
+}
diff --git a/Documentation/devicetree/bindings/interrupt-controller/cortina,gemini-interrupt-controller.txt b/Documentation/devicetree/bindings/interrupt-controller/cortina,gemini-interrupt-controller.txt
new file mode 100644 (file)
index 0000000..97c1167
--- /dev/null
@@ -0,0 +1,22 @@
+* Cortina Systems Gemini interrupt controller
+
+This interrupt controller is found on the Gemini SoCs.
+
+Required properties:
+- compatible: must be "cortina,gemini-interrupt-controller"
+- reg: The register bank for the interrupt controller.
+- interrupt-controller: Identifies the node as an interrupt controller
+- #interrupt-cells: The number of cells to define the interrupts.
+  Must be 2 as the controller can specify level or rising edge
+  IRQs. The bindings follows the standard binding for controllers
+  with two cells specified in
+  interrupt-controller/interrupts.txt
+
+Example:
+
+interrupt-controller@48000000 {
+       compatible = "cortina,gemini-interrupt-controller";
+       reg = <0x48000000 0x1000>;
+       interrupt-controller;
+       #interrupt-cells = <2>;
+};
index 0dcb7c7d3e40bdda3bea51c03729808ae2ac1427..944657684d73cc009bb42b0f7b50eeb1adb7f044 100644 (file)
@@ -15,6 +15,9 @@ Properties:
   Second cell specifies the irq distribution mode to cores
      0=Round Robin; 1=cpu0, 2=cpu1, 4=cpu2, 8=cpu3
 
+  The second cell in interrupts property is deprecated and may be ignored by
+  the kernel.
+
   intc accessed via the special ARC AUX register interface, hence "reg" property
   is not specified.
 
index 696be57926257817e061e5e21657ab4939f38345..24b6560140899aa8e956efa942f2074873e1f26b 100644 (file)
@@ -61,16 +61,24 @@ property can be omitted.
 
 Examples:
 
-system-status {
-       label = "Status";
-       linux,default-trigger = "heartbeat";
-       ...
+gpio-leds {
+       compatible = "gpio-leds";
+
+       system-status {
+               label = "Status";
+               linux,default-trigger = "heartbeat";
+               gpios = <&gpio0 0 GPIO_ACTIVE_HIGH>;
+       };
 };
 
-camera-flash {
-       label = "Flash";
-       led-sources = <0>, <1>;
-       led-max-microamp = <50000>;
-       flash-max-microamp = <320000>;
-       flash-max-timeout-us = <500000>;
+max77693-led {
+       compatible = "maxim,max77693-led";
+
+       camera-flash {
+               label = "Flash";
+               led-sources = <0>, <1>;
+               led-max-microamp = <50000>;
+               flash-max-microamp = <320000>;
+               flash-max-timeout-us = <500000>;
+       };
 };
diff --git a/Documentation/devicetree/bindings/mtd/aspeed-smc.txt b/Documentation/devicetree/bindings/mtd/aspeed-smc.txt
new file mode 100644 (file)
index 0000000..49f6528
--- /dev/null
@@ -0,0 +1,51 @@
+* Aspeed Firmware Memory controller
+* Aspeed SPI Flash Memory Controller
+
+The Firmware Memory Controller in the Aspeed AST2500 SoC supports
+three chip selects, two of which are always of SPI type and the third
+can be SPI or NOR type flash. These bindings only describe SPI.
+
+The two SPI flash memory controllers in the AST2500 each support two
+chip selects.
+
+Required properties:
+  - compatible : Should be one of
+       "aspeed,ast2400-fmc" for the AST2400 Firmware Memory Controller
+       "aspeed,ast2400-spi" for the AST2400 SPI Flash memory Controller
+       "aspeed,ast2500-fmc" for the AST2500 Firmware Memory Controller
+       "aspeed,ast2500-spi" for the AST2500 SPI flash memory controllers
+
+  - reg : the first contains the control register location and length,
+          the second contains the memory window mapping address and length
+  - #address-cells : must be 1 corresponding to chip select child binding
+  - #size-cells : must be 0 corresponding to chip select child binding
+
+Optional properties:
+  - interrupts : Should contain the interrupt for the dma device if an
+    FMC
+
+The child nodes are the SPI flash modules which must have a compatible
+property as specified in bindings/mtd/jedec,spi-nor.txt
+
+Optionally, the child node can contain properties for SPI mode (may be
+ignored):
+  - spi-max-frequency - max frequency of spi bus
+
+
+Example:
+fmc: fmc@1e620000 {
+       compatible = "aspeed,ast2500-fmc";
+       reg = < 0x1e620000 0x94
+               0x20000000 0x02000000 >;
+       #address-cells = <1>;
+       #size-cells = <0>;
+       interrupts = <19>;
+       flash@0 {
+               reg = < 0 >;
+               compatible = "jedec,spi-nor";
+               /* spi-max-frequency = <>; */
+               /* m25p,fast-read; */
+               #address-cells = <1>;
+               #size-cells = <1>;
+       };
+};
diff --git a/Documentation/devicetree/bindings/mtd/common.txt b/Documentation/devicetree/bindings/mtd/common.txt
new file mode 100644 (file)
index 0000000..fc068b9
--- /dev/null
@@ -0,0 +1,15 @@
+* Common properties of all MTD devices
+
+Optional properties:
+- label: user-defined MTD device name. Can be used to assign user
+  friendly names to MTD devices (instead of the flash model or flash
+  controller based name) in order to ease flash device identification
+  and/or describe what they are used for.
+
+Example:
+
+       flash@0 {
+               label = "System-firmware";
+
+               /* flash type specific properties */
+       };
diff --git a/Documentation/devicetree/bindings/mtd/cortina,gemini-flash.txt b/Documentation/devicetree/bindings/mtd/cortina,gemini-flash.txt
new file mode 100644 (file)
index 0000000..3fa1b34
--- /dev/null
@@ -0,0 +1,24 @@
+Flash device on Cortina Systems Gemini SoC
+
+This flash is regular CFI compatible (Intel or AMD extended) flash chips with
+some special bits that can be controlled by the machine's system controller.
+
+Required properties:
+- compatible : must be "cortina,gemini-flash", "cfi-flash";
+- reg : memory address for the flash chip
+- syscon : must be a phandle to the system controller
+- bank-width : width in bytes of flash interface, should be <2>
+
+For the rest of the properties, see mtd-physmap.txt.
+
+The device tree may optionally contain sub-nodes describing partitions of the
+address space. See partition.txt for more detail.
+
+Example:
+
+flash@30000000 {
+       compatible = "cortina,gemini-flash", "cfi-flash";
+       reg = <0x30000000 0x01000000>;
+       syscon = <&syscon>;
+       bank-width = <2>;
+};
index 2c91c03e7eb0265e1c7db5edc4d548600555e613..3e920ec5c4d36d48148e069ccb9de617dbee2326 100644 (file)
@@ -14,6 +14,8 @@ Required properties:
                  at25df641
                  at26df081a
                  mr25h256
+                 mr25h10
+                 mr25h40
                  mx25l4005a
                  mx25l1606e
                  mx25l6405d
index fb314f09861b81334496dbe2aaec08e0d7102913..5ded66ad7aef762c91b9436ec400380d505210af 100644 (file)
@@ -1,7 +1,13 @@
 * Serial NOR flash controller for MTK MT81xx (and similar)
 
 Required properties:
-- compatible:    should be "mediatek,mt8173-nor";
+- compatible:    The possible values are:
+                 "mediatek,mt2701-nor"
+                 "mediatek,mt7623-nor"
+                 "mediatek,mt8173-nor"
+                 For mt8173, compatible should be "mediatek,mt8173-nor".
+                 For every other SoC, should contain both the SoC-specific compatible string
+                 and "mediatek,mt8173-nor".
 - reg:                   physical base address and length of the controller's register
 - clocks:        the phandle of the clocks needed by the nor controller
 - clock-names:           the names of the clocks
index c010fafc66a8e4d6f0b1543104ac352f82ec992f..c7194e87d5f4b7d9b61927a430479e39f52ddca8 100644 (file)
@@ -7,7 +7,7 @@ have dual GMAC each represented by a child node..
 * Ethernet controller node
 
 Required properties:
-- compatible: Should be "mediatek,mt7623-eth"
+- compatible: Should be "mediatek,mt2701-eth"
 - reg: Address and length of the register set for the device
 - interrupts: Should contain the three frame engines interrupts in numeric
        order. These are fe_int0, fe_int1 and fe_int2.
index ff1bc4b1bb3b5e1d91a1747fcc73c24d587cd1a8..fb5056b22685c249c9bf812a1ada038070d8c03d 100644 (file)
@@ -19,8 +19,9 @@ Optional Properties:
   specifications. If neither of these are specified, the default is to
   assume clause 22.
 
-  If the phy's identifier is known then the list may contain an entry
-  of the form: "ethernet-phy-idAAAA.BBBB" where
+  If the PHY reports an incorrect ID (or none at all) then the
+  "compatible" list may contain an entry with the correct PHY ID in the
+  form: "ethernet-phy-idAAAA.BBBB" where
      AAAA - The value of the 16 bit Phy Identifier 1 register as
             4 hex digits. This is the chip vendor OUI bits 3:18
      BBBB - The value of the 16 bit Phy Identifier 2 register as
diff --git a/Documentation/devicetree/bindings/power/supply/axp20x_ac_power.txt b/Documentation/devicetree/bindings/power/supply/axp20x_ac_power.txt
new file mode 100644 (file)
index 0000000..826e8a8
--- /dev/null
@@ -0,0 +1,22 @@
+AXP20X and AXP22X PMICs' AC power supply
+
+Required Properties:
+ - compatible: One of:
+                       "x-powers,axp202-ac-power-supply"
+                       "x-powers,axp221-ac-power-supply"
+
+This node is a subnode of the axp20x PMIC.
+
+The AXP20X can read the current current and voltage supplied by AC by
+reading ADC channels from the AXP20X ADC.
+
+The AXP22X is only able to tell if an AC power supply is present and
+usable.
+
+Example:
+
+&axp209 {
+       ac_power_supply: ac-power-supply {
+               compatible = "x-powers,axp202-ac-power-supply";
+       };
+};
index f1d7beec45bf69803f57d1b2e320d3e20f155a2b..ba8d35f66cbe292e4ee42dd22b64c621467430dc 100644 (file)
@@ -3,6 +3,11 @@ AXP20x USB power supply
 Required Properties:
 -compatible: One of: "x-powers,axp202-usb-power-supply"
                      "x-powers,axp221-usb-power-supply"
+                     "x-powers,axp223-usb-power-supply"
+
+The AXP223 PMIC shares most of its behaviour with the AXP221 but has slight
+variations such as the former being able to set the VBUS power supply max
+current to 100mA, unlike the latter.
 
 This node is a subnode of the axp20x PMIC.
 
diff --git a/Documentation/devicetree/bindings/power/supply/bq27xxx.txt b/Documentation/devicetree/bindings/power/supply/bq27xxx.txt
new file mode 100644 (file)
index 0000000..b0c95ef
--- /dev/null
@@ -0,0 +1,36 @@
+Binding for TI BQ27XXX fuel gauge family
+
+Required properties:
+- compatible: Should contain one of the following:
+ * "ti,bq27200" - BQ27200
+ * "ti,bq27210" - BQ27210
+ * "ti,bq27500" - deprecated, use revision specific property below
+ * "ti,bq27510" - deprecated, use revision specific property below
+ * "ti,bq27520" - deprecated, use revision specific property below
+ * "ti,bq27500-1" - BQ27500/1
+ * "ti,bq27510g1" - BQ27510-g1
+ * "ti,bq27510g2" - BQ27510-g2
+ * "ti,bq27510g3" - BQ27510-g3
+ * "ti,bq27520g1" - BQ27520-g1
+ * "ti,bq27520g2" - BQ27520-g2
+ * "ti,bq27520g3" - BQ27520-g3
+ * "ti,bq27520g4" - BQ27520-g4
+ * "ti,bq27530" - BQ27530
+ * "ti,bq27531" - BQ27531
+ * "ti,bq27541" - BQ27541
+ * "ti,bq27542" - BQ27542
+ * "ti,bq27546" - BQ27546
+ * "ti,bq27742" - BQ27742
+ * "ti,bq27545" - BQ27545
+ * "ti,bq27421" - BQ27421
+ * "ti,bq27425" - BQ27425
+ * "ti,bq27441" - BQ27441
+ * "ti,bq27621" - BQ27621
+- reg: integer, i2c address of the device.
+
+Example:
+
+bq27510g3 {
+    compatible = "ti,bq27510g3";
+    reg = <0x55>;
+};
index 65b88fac854be0f3946269e38ce0b5d395d6288a..06f8a5ddb68ef8470f21c28c69f0a5386338e7c8 100644 (file)
@@ -105,6 +105,22 @@ PROPERTIES
                regulation must be done externally to fully comply with
                the JEITA safety guidelines if this flag is set.
 
+- usb_otg_in-supply:
+  Usage: optional
+  Value type: <phandle>
+  Description: Reference to the regulator supplying power to the USB_OTG_IN
+               pin.
+
+child nodes:
+- otg-vbus:
+  Usage: optional
+  Description: This node defines a regulator used to control the direction
+               of VBUS voltage - specifically: whether to supply voltage
+               to VBUS for host mode operation of the OTG port, or allow
+               input voltage from external VBUS for charging.  In the
+               hardware, the supply for this regulator comes from
+               usb_otg_in-supply.
+
 EXAMPLE
 charger@1000 {
        compatible = "qcom,pm8941-charger";
@@ -128,4 +144,7 @@ charger@1000 {
 
        qcom,fast-charge-current-limit = <1000000>;
        qcom,dc-charge-current-limit = <1000000>;
+       usb_otg_in-supply = <&pm8941_5vs1>;
+
+       otg-vbus {};
 };
diff --git a/Documentation/devicetree/bindings/power/supply/sbs_sbs-charger.txt b/Documentation/devicetree/bindings/power/supply/sbs_sbs-charger.txt
new file mode 100644 (file)
index 0000000..a371962
--- /dev/null
@@ -0,0 +1,23 @@
+SBS sbs-charger
+~~~~~~~~~~
+
+Required properties:
+ - compatible: "<vendor>,<part-number>", "sbs,sbs-charger" as fallback. The part
+     number compatible string might be used in order to take care of vendor
+     specific registers.
+
+Optional properties:
+- interrupt-parent: Should be the phandle for the interrupt controller. Use in
+    conjunction with "interrupts".
+- interrupts: Interrupt mapping for GPIO IRQ. Use in conjunction with
+    "interrupt-parent". If an interrupt is not provided the driver will switch
+    automatically to polling.
+
+Example:
+
+       ltc4100@9 {
+               compatible = "lltc,ltc4100", "sbs,sbs-charger";
+               reg = <0x9>;
+               interrupt-parent = <&gpio6>;
+               interrupts = <7 IRQ_TYPE_LEVEL_LOW>;
+       };
index 3bf55757ceece73edf1b23f67204e6d1a103f4f2..de45e1a2a4d9e3e35851255316e7574ea187cbe2 100644 (file)
@@ -8,8 +8,10 @@ Optional properties :
  - interrupts : Specify the interrupt to be used to trigger when the AC
    adapter is either plugged in or removed.
  - ti,ac-detect-gpios : This GPIO is optionally used to read the AC adapter
-   presence. This is a Host GPIO that is configured as an input and
-   connected to the bq24735.
+   status. This is a Host GPIO that is configured as an input and connected
+   to the ACOK pin on the bq24735. Note: for backwards compatibility reasons,
+   the GPIO must be active on AC adapter absence despite ACOK being active
+   (high) on AC adapter presence.
  - ti,charge-current : Used to control and set the charging current. This value
    must be between 128mA and 8.128A with a 64mA step resolution. The POR value
    is 0x0000h. This number is in mA (e.g. 8192), see spec for more information
@@ -25,6 +27,8 @@ Optional properties :
  - ti,external-control : Indicates that the charger is configured externally
    and that the host should not attempt to enable/disable charging or set the
    charge voltage/current.
+ - poll-interval : In case 'interrupts' is not specified, poll AC adapter
+   presence with this interval (milliseconds).
 
 Example:
 
diff --git a/Documentation/devicetree/bindings/power_supply/maxim,max14656.txt b/Documentation/devicetree/bindings/power_supply/maxim,max14656.txt
new file mode 100644 (file)
index 0000000..e03e85a
--- /dev/null
@@ -0,0 +1,25 @@
+Maxim MAX14656 / AL32 USB Charger Detector
+
+Required properties :
+- compatible : "maxim,max14656";
+- reg: i2c slave address
+- interrupt-parent: the phandle for the interrupt controller
+- interrupts: interrupt line
+
+Example:
+
+&i2c2 {
+       clock-frequency = <50000>;
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_i2c2>;
+       status = "okay";
+
+       max14656@35 {
+               compatible = "maxim,max14656";
+               reg = <0x35>;
+               pinctrl-names = "default";
+               pinctrl-0 = <&pinctrl_charger_detect>;
+               interrupt-parent = <&gpio6>;
+               interrupts = <26 IRQ_TYPE_LEVEL_HIGH>;
+       };
+};
index 37c4ea076f88e694b773a6233e37c8dd68dc5192..1d58c8cfdbc01d8fffad426774dd730a987af2d1 100644 (file)
@@ -14,6 +14,7 @@ Optional properties:
 - anatop-delay-bit-shift: Bit shift for the step time register
 - anatop-delay-bit-width: Number of bits used in the step time register
 - vin-supply: The supply for this regulator
+- anatop-enable-bit: Regulator enable bit offset
 
 Any property defined as part of the core regulator
 binding, defined in regulator.txt, can also be used.
diff --git a/Documentation/devicetree/bindings/regulator/cpcap-regulator.txt b/Documentation/devicetree/bindings/regulator/cpcap-regulator.txt
new file mode 100644 (file)
index 0000000..675f443
--- /dev/null
@@ -0,0 +1,34 @@
+Motorola CPCAP PMIC voltage regulators
+------------------------------------
+
+Requires node properties:
+- "compatible" value one of:
+    "motorola,cpcap-regulator"
+    "motorola,mapphone-cpcap-regulator"
+
+Required regulator properties:
+- "regulator-name"
+- "regulator-enable-ramp-delay"
+- "regulator-min-microvolt"
+- "regulator-max-microvolt"
+
+Optional regulator properties:
+- "regulator-boot-on"
+
+See Documentation/devicetree/bindings/regulator/regulator.txt
+for more details about the regulator properties.
+
+Example:
+
+cpcap_regulator: regulator {
+       compatible = "motorola,cpcap-regulator";
+
+       cpcap_regulators: regulators {
+               sw5: SW5 {
+                       regulator-min-microvolt = <5050000>;
+                       regulator-max-microvolt = <5050000>;
+                       regulator-enable-ramp-delay = <50000>;
+                       regulator-boot-on;
+               };
+       };
+};
index e5cac1e0ca8a734d43549ad125a837beef457e13..dd1ed789728e0702a1d1d39aecf223fc1f22ef71 100644 (file)
@@ -13,7 +13,7 @@ Optional properties:
 - startup-delay-us     : Startup time in microseconds.
 - enable-active-high   : Polarity of GPIO is active high (default is low).
 - regulator-type       : Specifies what is being regulated, must be either
-                         "voltage" or "current", defaults to current.
+                         "voltage" or "current", defaults to voltage.
 
 Any property defined as part of the core regulator binding defined in
 regulator.txt can also be used.
index 1f8d6f84b657d3f63257b9f0d999ba07732cbff0..4e3dfb5b5f1661b60784f86ef2a835b61a1ca8bc 100644 (file)
@@ -22,6 +22,7 @@ Regulator nodes are identified by their compatible:
                    "qcom,rpm-pm8841-regulators"
                    "qcom,rpm-pm8916-regulators"
                    "qcom,rpm-pm8941-regulators"
+                   "qcom,rpm-pm8994-regulators"
                    "qcom,rpm-pma8084-regulators"
 
 - vdd_s1-supply:
@@ -68,6 +69,56 @@ Regulator nodes are identified by their compatible:
        Definition: reference to regulator supplying the input pin, as
                    described in the data sheet
 
+- vdd_s1-supply:
+- vdd_s2-supply:
+- vdd_s3-supply:
+- vdd_s4-supply:
+- vdd_s5-supply:
+- vdd_s6-supply:
+- vdd_s7-supply:
+- vdd_s8-supply:
+- vdd_s9-supply:
+- vdd_s10-supply:
+- vdd_s11-supply:
+- vdd_s12-supply:
+- vdd_l1-supply:
+- vdd_l2_l26_l28-supply:
+- vdd_l3_l11-supply:
+- vdd_l4_l27_l31-supply:
+- vdd_l5_l7-supply:
+- vdd_l6_l12_l32-supply:
+- vdd_l5_l7-supply:
+- vdd_l8_l16_l30-supply:
+- vdd_l9_l10_l18_l22-supply:
+- vdd_l9_l10_l18_l22-supply:
+- vdd_l3_l11-supply:
+- vdd_l6_l12_l32-supply:
+- vdd_l13_l19_l23_l24-supply:
+- vdd_l14_l15-supply:
+- vdd_l14_l15-supply:
+- vdd_l8_l16_l30-supply:
+- vdd_l17_l29-supply:
+- vdd_l9_l10_l18_l22-supply:
+- vdd_l13_l19_l23_l24-supply:
+- vdd_l20_l21-supply:
+- vdd_l20_l21-supply:
+- vdd_l9_l10_l18_l22-supply:
+- vdd_l13_l19_l23_l24-supply:
+- vdd_l13_l19_l23_l24-supply:
+- vdd_l25-supply:
+- vdd_l2_l26_l28-supply:
+- vdd_l4_l27_l31-supply:
+- vdd_l2_l26_l28-supply:
+- vdd_l17_l29-supply:
+- vdd_l8_l16_l30-supply:
+- vdd_l4_l27_l31-supply:
+- vdd_l6_l12_l32-supply:
+- vdd_lvs1_2-supply:
+       Usage: optional (pm8994 only)
+       Value type: <phandle>
+       Definition: reference to regulator supplying the input pin, as
+                   described in the data sheet
+
 - vdd_s1-supply:
 - vdd_s2-supply:
 - vdd_s3-supply:
@@ -113,6 +164,11 @@ pm8941:
        l14, l15, l16, l17, l18, l19, l20, l21, l22, l23, l24, lvs1, lvs2,
        lvs3, 5vs1, 5vs2
 
+pm8994:
+       s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, l1, l2, l3, l4, l5,
+       l6, l7, l8, l9, l10, l11, l12, l13, l14, l15, l16, l17, l18, l19, l20,
+       l21, l22, l23, l24, l25, l26, l27, l28, l29, l30, l31, l32, lvs1, lvs2
+
 pma8084:
        s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, l1, l2, l3, l4, l5,
        l6, l7, l8, l9, l10, l11, l12, l13, l14, l15, l16, l17, l18, l19, l20,
diff --git a/Documentation/devicetree/bindings/spi/spi-lantiq-ssc.txt b/Documentation/devicetree/bindings/spi/spi-lantiq-ssc.txt
new file mode 100644 (file)
index 0000000..6069b95
--- /dev/null
@@ -0,0 +1,29 @@
+Lantiq Synchronous Serial Controller (SSC) SPI master driver
+
+Required properties:
+- compatible: "lantiq,ase-spi", "lantiq,falcon-spi", "lantiq,xrx100-spi"
+- #address-cells: see spi-bus.txt
+- #size-cells: see spi-bus.txt
+- reg: address and length of the spi master registers
+- interrupts: should contain the "spi_rx", "spi_tx" and "spi_err" interrupt.
+
+
+Optional properties:
+- clocks: spi clock phandle
+- num-cs: see spi-bus.txt, set to 8 if unset
+- base-cs: the number of the first chip select, set to 1 if unset.
+
+Example:
+
+
+spi: spi@E100800 {
+       compatible = "lantiq,xrx200-spi", "lantiq,xrx100-spi";
+       reg = <0xE100800 0x100>;
+       interrupt-parent = <&icu0>;
+       interrupts = <22 23 24>;
+       interrupt-names = "spi_rx", "spi_tx", "spi_err";
+       #address-cells = <1>;
+       #size-cells = <1>;
+       num-cs = <6>;
+       base-cs = <1>;
+};
index d2ca153614f912fcd06cca8e1e65090030eba96e..83da4931d832688f51e19a56aaa03af8482e721a 100644 (file)
@@ -31,6 +31,10 @@ Optional Properties:
 - rx-sample-delay-ns: nanoseconds to delay after the SCLK edge before sampling
                Rx data (may need to be fine tuned for high capacitance lines).
                No delay (0) by default.
+- pinctrl-names: Names for the pin configuration(s); may be "default" or
+               "sleep", where the "sleep" configuration may describe the state
+               the pins should be in during system suspend. See also
+               pinctrl/pinctrl-bindings.txt.
 
 
 Example:
@@ -46,4 +50,7 @@ Example:
                interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&cru SCLK_SPI0>, <&cru PCLK_SPI0>;
                clock-names = "spiclk", "apb_pclk";
+               pinctrl-0 = <&spi1_pins>;
+               pinctrl-1 = <&spi1_sleep>;
+               pinctrl-names = "default", "sleep";
        };
diff --git a/Documentation/devicetree/bindings/timer/cortina,gemini-timer.txt b/Documentation/devicetree/bindings/timer/cortina,gemini-timer.txt
new file mode 100644 (file)
index 0000000..16ea1d3
--- /dev/null
@@ -0,0 +1,22 @@
+Cortina Systems Gemini timer
+
+This timer is embedded in the Cortina Systems Gemini SoCs.
+
+Required properties:
+
+- compatible : Must be "cortina,gemini-timer"
+- reg : Should contain registers location and length
+- interrupts : Should contain the three timer interrupts with
+  flags for rising edge
+- syscon : a phandle to the global Gemini system controller
+
+Example:
+
+timer@43000000 {
+       compatible = "cortina,gemini-timer";
+       reg = <0x43000000 0x1000>;
+       interrupts = <14 IRQ_TYPE_EDGE_RISING>, /* Timer 1 */
+                  <15 IRQ_TYPE_EDGE_RISING>, /* Timer 2 */
+                  <16 IRQ_TYPE_EDGE_RISING>; /* Timer 3 */
+       syscon = <&syscon>;
+};
diff --git a/Documentation/devicetree/bindings/timer/renesas,ostm.txt b/Documentation/devicetree/bindings/timer/renesas,ostm.txt
new file mode 100644 (file)
index 0000000..be3ae0f
--- /dev/null
@@ -0,0 +1,30 @@
+* Renesas OS Timer (OSTM)
+
+The OSTM is a multi-channel 32-bit timer/counter with fixed clock
+source that can operate in either interval count down timer or free-running
+compare match mode.
+
+Channels are independent from each other.
+
+Required Properties:
+
+  - compatible: must be one or more of the following:
+    - "renesas,r7s72100-ostm" for the r7s72100 OSTM
+    - "renesas,ostm" for any OSTM
+               This is a fallback for the above renesas,*-ostm entries
+
+  - reg: base address and length of the register block for a timer channel.
+
+  - interrupts: interrupt specifier for the timer channel.
+
+  - clocks: clock specifier for the timer channel.
+
+Example: R7S72100 (RZ/A1H) OSTM node
+
+       ostm0: timer@fcfec000 {
+               compatible = "renesas,r7s72100-ostm", "renesas,ostm";
+               reg = <0xfcfec000 0x30>;
+               interrupts = <GIC_SPI 102 IRQ_TYPE_EDGE_RISING>;
+               clocks = <&mstp5_clks R7S72100_CLK_OSTM0>;
+               power-domains = <&cpg_clocks>;
+       };
index ca9d1eb46bc00e38f1f1250775ce43cac241e9a7..bf34d5b3a7330e5c26f9c6eebe977ed853804959 100644 (file)
@@ -306,6 +306,11 @@ IRQ
   devm_request_any_context_irq()
   devm_request_irq()
   devm_request_threaded_irq()
+  devm_irq_alloc_descs()
+  devm_irq_alloc_desc()
+  devm_irq_alloc_desc_at()
+  devm_irq_alloc_desc_from()
+  devm_irq_alloc_descs_from()
 
 LED
   devm_led_classdev_register()
index 72624a16b79284c0f2484741144e6b90ac084064..c94b4675d021ffd374de22d7d83df61dbb6c34dd 100644 (file)
@@ -212,10 +212,11 @@ asynchronous manner and the value may not be very precise. To see a precise
 snapshot of a moment, you can see /proc/<pid>/smaps file and scan page table.
 It's slow but very precise.
 
-Table 1-2: Contents of the status files (as of 4.1)
+Table 1-2: Contents of the status files (as of 4.8)
 ..............................................................................
  Field                       Content
  Name                        filename of the executable
+ Umask                       file mode creation mask
  State                       state (R is running, S is sleeping, D is sleeping
                              in an uninterruptible wait, Z is zombie,
                             T is traced or stopped)
@@ -226,7 +227,6 @@ Table 1-2: Contents of the status files (as of 4.1)
  TracerPid                   PID of process tracing this process (0 if not)
  Uid                         Real, effective, saved set, and  file system UIDs
  Gid                         Real, effective, saved set, and  file system GIDs
- Umask                       file mode creation mask
  FDSize                      number of file descriptor slots currently allocated
  Groups                      supplementary group list
  NStgid                      descendant namespace thread group ID hierarchy
@@ -236,6 +236,7 @@ Table 1-2: Contents of the status files (as of 4.1)
  VmPeak                      peak virtual memory size
  VmSize                      total program size
  VmLck                       locked memory size
+ VmPin                       pinned memory size
  VmHWM                       peak resident set size ("high water mark")
  VmRSS                       size of memory portions. It contains the three
                              following parts (VmRSS = RssAnon + RssFile + RssShmem)
index 2505ae67e2b632eed3c4d161d4482c2ec96959ad..53a806696c64923be8fdeb97fb6b474a696f83d2 100644 (file)
@@ -89,6 +89,10 @@ the call to devm_hwmon_device_register_with_groups or
 hwmon_device_register_with_info and if the automatic (device managed)
 removal would be too late.
 
+All supported hwmon device registration functions only accept valid device
+names. Device names including invalid characters (whitespace, '*', or '-')
+will be rejected. The 'name' parameter is mandatory.
+
 Using devm_hwmon_device_register_with_info()
 --------------------------------------------
 
index 1bb2db4406717ec02f4033553fd7df6c16d2951a..c3a1f2ea017d5b3112c99a0a3b55d38693645398 100644 (file)
@@ -6,6 +6,8 @@ Supported chips:
     Datasheet: http://www.national.com/pf/LM/LM70.html
   * Texas Instruments TMP121/TMP123
     Information: http://focus.ti.com/docs/prod/folders/print/tmp121.html
+  * Texas Instruments TMP122/TMP124
+    Information: http://www.ti.com/product/tmp122
   * National Semiconductor LM71
     Datasheet: http://www.ti.com/product/LM71
   * National Semiconductor LM74
@@ -35,8 +37,10 @@ As a real (in-tree) example of this "SPI protocol driver" interfacing
 with a "SPI master controller driver", see drivers/spi/spi_lm70llp.c
 and its associated documentation.
 
-The LM74 and TMP121/TMP123 are very similar; main difference is 13-bit
-temperature data (0.0625 degrees celsius resolution).
+The LM74 and TMP121/TMP122/TMP123/TMP124 are very similar; main difference is
+13-bit temperature data (0.0625 degrees celsius resolution).
+
+The TMP122/TMP124 also feature configurable temperature thresholds.
 
 The LM71 is also very similar; main difference is 14-bit temperature
 data (0.03125 degrees celsius resolution).
index db17fda45c3e2e8444de380b1f37d2d4f0ce6d50..47f4765db256c9e61e36aa2cffb196780995316f 100644 (file)
@@ -35,6 +35,7 @@ sysfs-Interface
 
 temp1_input - temperature input
 humidity1_input - humidity input
+eic - Electronic Identification Code
 
 Notes
 -----
@@ -45,5 +46,5 @@ humidity and 66 ms for temperature. To keep self heating below 0.1 degree
 Celsius, the device should not be active for more than 10% of the time,
 e.g. maximum two measurements per second at the given resolution.
 
-Different resolutions, the on-chip heater, using the CRC checksum and reading
-the serial number are not supported yet.
+Different resolutions, the on-chip heater, and using the CRC checksum
+are not supported yet.
index 2cc95ad466047b055d45f748619cf2d7231d5e93..fc337c317c67353a80afc3a88997e2ead759c340 100644 (file)
@@ -86,8 +86,9 @@ given driver if the chip has the feature.
 
 name           The chip name.
                This should be a short, lowercase string, not containing
-               spaces nor dashes, representing the chip name. This is
-               the only mandatory attribute.
+               whitespace, dashes, or the wildcard character '*'.
+               This attribute represents the chip name. It is the only
+               mandatory attribute.
                I2C devices get this attribute created automatically.
                RO
 
index f1f7ec9f5cc555630040640cbbc7d55b5dedaffa..836cb16d6f09fe4b70b0ad16585ddcdb6f455336 100644 (file)
@@ -65,6 +65,21 @@ LED subsystem core exposes following API for setting brightness:
                blinking, returns -EBUSY if software blink fallback is enabled.
 
 
+LED registration API
+====================
+
+A driver wanting to register a LED classdev for use by other drivers /
+userspace needs to allocate and fill a led_classdev struct and then call
+[devm_]led_classdev_register. If the non devm version is used the driver
+must call led_classdev_unregister from its remove function before
+free-ing the led_classdev struct.
+
+If the driver can detect hardware initiated brightness changes and thus
+wants to have a brightness_hw_changed attribute then the LED_BRIGHT_HW_CHANGED
+flag must be set in flags before registering. Calling
+led_classdev_notify_brightness_hw_changed on a classdev not registered with
+the LED_BRIGHT_HW_CHANGED flag is a bug and will trigger a WARN_ON.
+
 Hardware accelerated blink of LEDs
 ==================================
 
index f5967316deb9fd38462c3de20620e23bb4fb1860..7f04e13ec53d80189553fefd90dcfca135c6d205 100644 (file)
@@ -329,25 +329,6 @@ The current Livepatch implementation has several limitations:
     by "notrace".
 
 
-  + Anything inlined into __schedule() can not be patched.
-
-    The switch_to macro is inlined into __schedule(). It switches the
-    context between two processes in the middle of the macro. It does
-    not save RIP in x86_64 version (contrary to 32-bit version). Instead,
-    the currently used __schedule()/switch_to() handles both processes.
-
-    Now, let's have two different tasks. One calls the original
-    __schedule(), its registers are stored in a defined order and it
-    goes to sleep in the switch_to macro and some other task is restored
-    using the original __schedule(). Then there is the second task which
-    calls patched__schedule(), it goes to sleep there and the first task
-    is picked by the patched__schedule(). Its RSP is restored and now
-    the registers should be restored as well. But the order is different
-    in the new patched__schedule(), so...
-
-    There is work in progress to remove this limitation.
-
-
   + Livepatch modules can not be removed.
 
     The current implementation just redirects the functions at the very
index 8a112dc304c31baeec6af96bb8096c9a0cadbf9d..34c3a1b50b9aefc51fe1c4bf6811b445a6be97a8 100644 (file)
@@ -309,11 +309,15 @@ Design:
   normal mutex locks, which are far more common. As such there is only a small
   increase in code size if wait/wound mutexes are not used.
 
+  We maintain the following invariants for the wait list:
+  (1) Waiters with an acquire context are sorted by stamp order; waiters
+      without an acquire context are interspersed in FIFO order.
+  (2) Among waiters with contexts, only the first one can have other locks
+      acquired already (ctx->acquired > 0). Note that this waiter may come
+      after other waiters without contexts in the list.
+
   In general, not much contention is expected. The locks are typically used to
-  serialize access to resources for devices. The only way to make wakeups
-  smarter would be at the cost of adding a field to struct mutex_waiter. This
-  would add overhead to all cases where normal mutexes are used, and
-  ww_mutexes are generally less performance sensitive.
+  serialize access to resources for devices.
 
 Lockdep:
   Special care has been taken to warn for as many cases of api abuse
index 8267c31b317dc9ac8e86007c56de62c0f79ebcba..895d9c2d1c04302f9452350b7fffe3678af1ee61 100644 (file)
@@ -33,11 +33,6 @@ Arguments
 Description
 ===========
 
-.. note::
-
-   This documents the proposed CEC API. This API is not yet finalized
-   and is currently only available as a staging kernel module.
-
 Closes the cec device. Resources associated with the file descriptor are
 freed. The device configuration remain unchanged.
 
index 9e8dbb118d6a3d518391caa1a086f05a545b57c2..7dcfd178fb243c33f6ba23bb331236496018809c 100644 (file)
@@ -39,11 +39,6 @@ Arguments
 Description
 ===========
 
-.. note::
-
-   This documents the proposed CEC API. This API is not yet finalized
-   and is currently only available as a staging kernel module.
-
 The :c:func:`ioctl()` function manipulates cec device parameters. The
 argument ``fd`` must be an open file descriptor.
 
index af3f5b5c24c646dfe61dea9b8afed05a9cb84228..0304388cd15976a7ff00eb46ca9bc9ef5f0a3c94 100644 (file)
@@ -46,11 +46,6 @@ Arguments
 Description
 ===========
 
-.. note::
-
-   This documents the proposed CEC API. This API is not yet finalized
-   and is currently only available as a staging kernel module.
-
 To open a cec device applications call :c:func:`open()` with the
 desired device name. The function has no side effects; the device
 configuration remain unchanged.
index cfb73e6027a55734747255df676619e2e55441f7..6a863cfda6e05172be7e60e930fbd0ff885b3ce4 100644 (file)
@@ -39,11 +39,6 @@ Arguments
 Description
 ===========
 
-.. note::
-
-   This documents the proposed CEC API. This API is not yet finalized
-   and is currently only available as a staging kernel module.
-
 With the :c:func:`poll()` function applications can wait for CEC
 events.
 
index 4a19ea5323a97d6ac577b62caf015e6cf2653f63..07ee2b8f89d6a320d66f1a9ed3d20977aa60c34b 100644 (file)
@@ -3,11 +3,6 @@
 Introduction
 ============
 
-.. note::
-
-   This documents the proposed CEC API. This API is not yet finalized
-   and is currently only available as a staging kernel module.
-
 HDMI connectors provide a single pin for use by the Consumer Electronics
 Control protocol. This protocol allows different devices connected by an
 HDMI cable to communicate. The protocol for CEC version 1.4 is defined
@@ -31,3 +26,15 @@ control just the CEC pin.
 Drivers that support CEC will create a CEC device node (/dev/cecX) to
 give userspace access to the CEC adapter. The
 :ref:`CEC_ADAP_G_CAPS` ioctl will tell userspace what it is allowed to do.
+
+In order to check the support and test it, it is suggested to download
+the `v4l-utils <https://git.linuxtv.org/v4l-utils.git/>`_ package. It
+provides three tools to handle CEC:
+
+- cec-ctl: the Swiss army knife of CEC. Allows you to configure, transmit
+  and monitor CEC messages.
+
+- cec-compliance: does a CEC compliance test of a remote CEC device to
+  determine how compliant the CEC implementation is.
+
+- cec-follower: emulates a CEC follower.
index 2b0ddb14b280e6fddcd8aae1bc67f60560e8a9e4..a0e961f11017c74d9d8ca36fa57124910cd6dbdd 100644 (file)
@@ -29,11 +29,6 @@ Arguments
 Description
 ===========
 
-.. note::
-
-   This documents the proposed CEC API. This API is not yet finalized
-   and is currently only available as a staging kernel module.
-
 All cec devices must support :ref:`ioctl CEC_ADAP_G_CAPS <CEC_ADAP_G_CAPS>`. To query
 device information, applications call the ioctl with a pointer to a
 struct :c:type:`cec_caps`. The driver fills the structure and
index b878637e91b3d8dc78aebf0ccb94823ef6b96ac5..09f09bbe28d4ffb1d5b3291221c4b678e4e38c6e 100644 (file)
@@ -35,11 +35,6 @@ Arguments
 Description
 ===========
 
-.. note::
-
-   This documents the proposed CEC API. This API is not yet finalized
-   and is currently only available as a staging kernel module.
-
 To query the current CEC logical addresses, applications call
 :ref:`ioctl CEC_ADAP_G_LOG_ADDRS <CEC_ADAP_G_LOG_ADDRS>` with a pointer to a
 struct :c:type:`cec_log_addrs` where the driver stores the logical addresses.
index 3357deb43c85afb7c577a2fe34127f210903ae73..a3cdc75cec3e3c0754027869cee43ad8ca019260 100644 (file)
@@ -35,11 +35,6 @@ Arguments
 Description
 ===========
 
-.. note::
-
-   This documents the proposed CEC API. This API is not yet finalized
-   and is currently only available as a staging kernel module.
-
 To query the current physical address applications call
 :ref:`ioctl CEC_ADAP_G_PHYS_ADDR <CEC_ADAP_G_PHYS_ADDR>` with a pointer to a __u16 where the
 driver stores the physical address.
index e256c6605de7f7b7242a1ad4fbd303d5b5309369..6e589a1fae1704c0eea188c662537d37ea6142b5 100644 (file)
@@ -30,11 +30,6 @@ Arguments
 Description
 ===========
 
-.. note::
-
-   This documents the proposed CEC API. This API is not yet finalized
-   and is currently only available as a staging kernel module.
-
 CEC devices can send asynchronous events. These can be retrieved by
 calling :c:func:`CEC_DQEVENT`. If the file descriptor is in
 non-blocking mode and no event is pending, then it will return -1 and
index 4f5818b9d27724163781713a14241dca38dca0cb..e4ded9df0a84a3be52a4f2a5fb09359e1d4fe285 100644 (file)
@@ -31,11 +31,6 @@ Arguments
 Description
 ===========
 
-.. note::
-
-   This documents the proposed CEC API. This API is not yet finalized
-   and is currently only available as a staging kernel module.
-
 By default any filehandle can use :ref:`CEC_TRANSMIT`, but in order to prevent
 applications from stepping on each others toes it must be possible to
 obtain exclusive access to the CEC adapter. This ioctl sets the
index bdf015b1d1dc0a677b89cdc05b7637687370a496..dc2adb391c0a2d2183e3e00bc90af520e727c6dd 100644 (file)
@@ -34,11 +34,6 @@ Arguments
 Description
 ===========
 
-.. note::
-
-   This documents the proposed CEC API. This API is not yet finalized
-   and is currently only available as a staging kernel module.
-
 To receive a CEC message the application has to fill in the
 ``timeout`` field of struct :c:type:`cec_msg` and pass it to
 :ref:`ioctl CEC_RECEIVE <CEC_RECEIVE>`.
index 44bb5a7059b38212cbecf33f530355b64459f7dd..95a23a28c59521625f0dbdc0d37eac8c84f4f92a 100644 (file)
@@ -211,7 +211,13 @@ Colorspace sRGB (V4L2_COLORSPACE_SRGB)
 The :ref:`srgb` standard defines the colorspace used by most webcams
 and computer graphics. The default transfer function is
 ``V4L2_XFER_FUNC_SRGB``. The default Y'CbCr encoding is
-``V4L2_YCBCR_ENC_601``. The default Y'CbCr quantization is full range.
+``V4L2_YCBCR_ENC_601``. The default Y'CbCr quantization is limited range.
+
+Note that the :ref:`sycc` standard specifies full range quantization,
+however all current capture hardware supported by the kernel convert
+R'G'B' to limited range Y'CbCr. So choosing full range as the default
+would break how applications interpret the quantization range.
+
 The chromaticities of the primary colors and the white reference are:
 
 
@@ -276,7 +282,7 @@ the following ``V4L2_YCBCR_ENC_601`` encoding as defined by :ref:`sycc`:
 
 Y' is clamped to the range [0…1] and Cb and Cr are clamped to the range
 [-0.5…0.5]. This transform is identical to one defined in SMPTE
-170M/BT.601. The Y'CbCr quantization is full range.
+170M/BT.601. The Y'CbCr quantization is limited range.
 
 
 .. _col-adobergb:
@@ -288,10 +294,15 @@ The :ref:`adobergb` standard defines the colorspace used by computer
 graphics that use the AdobeRGB colorspace. This is also known as the
 :ref:`oprgb` standard. The default transfer function is
 ``V4L2_XFER_FUNC_ADOBERGB``. The default Y'CbCr encoding is
-``V4L2_YCBCR_ENC_601``. The default Y'CbCr quantization is full
-range. The chromaticities of the primary colors and the white reference
-are:
+``V4L2_YCBCR_ENC_601``. The default Y'CbCr quantization is limited
+range.
+
+Note that the :ref:`oprgb` standard specifies full range quantization,
+however all current capture hardware supported by the kernel convert
+R'G'B' to limited range Y'CbCr. So choosing full range as the default
+would break how applications interpret the quantization range.
 
+The chromaticities of the primary colors and the white reference are:
 
 
 .. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
@@ -344,7 +355,7 @@ the following ``V4L2_YCBCR_ENC_601`` encoding:
 
 Y' is clamped to the range [0…1] and Cb and Cr are clamped to the range
 [-0.5…0.5]. This transform is identical to one defined in SMPTE
-170M/BT.601. The Y'CbCr quantization is full range.
+170M/BT.601. The Y'CbCr quantization is limited range.
 
 
 .. _col-bt2020:
index ba818ecce6f99508f1136a0eb0d3ee07eee74715..d2b0a8d81258b43cc1bc97350b9fa4f713101b58 100644 (file)
@@ -640,6 +640,10 @@ See also the subsection on "Cache Coherency" for a more thorough example.
 CONTROL DEPENDENCIES
 --------------------
 
+Control dependencies can be a bit tricky because current compilers do
+not understand them.  The purpose of this section is to help you prevent
+the compiler's ignorance from breaking your code.
+
 A load-load control dependency requires a full read memory barrier, not
 simply a data dependency barrier to make it work correctly.  Consider the
 following bit of code:
@@ -667,14 +671,15 @@ for load-store control dependencies, as in the following example:
 
        q = READ_ONCE(a);
        if (q) {
-               WRITE_ONCE(b, p);
+               WRITE_ONCE(b, 1);
        }
 
-Control dependencies pair normally with other types of barriers.  That
-said, please note that READ_ONCE() is not optional! Without the
-READ_ONCE(), the compiler might combine the load from 'a' with other
-loads from 'a', and the store to 'b' with other stores to 'b', with
-possible highly counterintuitive effects on ordering.
+Control dependencies pair normally with other types of barriers.
+That said, please note that neither READ_ONCE() nor WRITE_ONCE()
+are optional! Without the READ_ONCE(), the compiler might combine the
+load from 'a' with other loads from 'a'.  Without the WRITE_ONCE(),
+the compiler might combine the store to 'b' with other stores to 'b'.
+Either can result in highly counterintuitive effects on ordering.
 
 Worse yet, if the compiler is able to prove (say) that the value of
 variable 'a' is always non-zero, it would be well within its rights
@@ -682,7 +687,7 @@ to optimize the original example by eliminating the "if" statement
 as follows:
 
        q = a;
-       b = p;  /* BUG: Compiler and CPU can both reorder!!! */
+       b = 1;  /* BUG: Compiler and CPU can both reorder!!! */
 
 So don't leave out the READ_ONCE().
 
@@ -692,11 +697,11 @@ branches of the "if" statement as follows:
        q = READ_ONCE(a);
        if (q) {
                barrier();
-               WRITE_ONCE(b, p);
+               WRITE_ONCE(b, 1);
                do_something();
        } else {
                barrier();
-               WRITE_ONCE(b, p);
+               WRITE_ONCE(b, 1);
                do_something_else();
        }
 
@@ -705,12 +710,12 @@ optimization levels:
 
        q = READ_ONCE(a);
        barrier();
-       WRITE_ONCE(b, p);  /* BUG: No ordering vs. load from a!!! */
+       WRITE_ONCE(b, 1);  /* BUG: No ordering vs. load from a!!! */
        if (q) {
-               /* WRITE_ONCE(b, p); -- moved up, BUG!!! */
+               /* WRITE_ONCE(b, 1); -- moved up, BUG!!! */
                do_something();
        } else {
-               /* WRITE_ONCE(b, p); -- moved up, BUG!!! */
+               /* WRITE_ONCE(b, 1); -- moved up, BUG!!! */
                do_something_else();
        }
 
@@ -723,10 +728,10 @@ memory barriers, for example, smp_store_release():
 
        q = READ_ONCE(a);
        if (q) {
-               smp_store_release(&b, p);
+               smp_store_release(&b, 1);
                do_something();
        } else {
-               smp_store_release(&b, p);
+               smp_store_release(&b, 1);
                do_something_else();
        }
 
@@ -735,10 +740,10 @@ ordering is guaranteed only when the stores differ, for example:
 
        q = READ_ONCE(a);
        if (q) {
-               WRITE_ONCE(b, p);
+               WRITE_ONCE(b, 1);
                do_something();
        } else {
-               WRITE_ONCE(b, r);
+               WRITE_ONCE(b, 2);
                do_something_else();
        }
 
@@ -751,10 +756,10 @@ the needed conditional.  For example:
 
        q = READ_ONCE(a);
        if (q % MAX) {
-               WRITE_ONCE(b, p);
+               WRITE_ONCE(b, 1);
                do_something();
        } else {
-               WRITE_ONCE(b, r);
+               WRITE_ONCE(b, 2);
                do_something_else();
        }
 
@@ -763,7 +768,7 @@ equal to zero, in which case the compiler is within its rights to
 transform the above code into the following:
 
        q = READ_ONCE(a);
-       WRITE_ONCE(b, p);
+       WRITE_ONCE(b, 1);
        do_something_else();
 
 Given this transformation, the CPU is not required to respect the ordering
@@ -776,10 +781,10 @@ one, perhaps as follows:
        q = READ_ONCE(a);
        BUILD_BUG_ON(MAX <= 1); /* Order load from a with store to b. */
        if (q % MAX) {
-               WRITE_ONCE(b, p);
+               WRITE_ONCE(b, 1);
                do_something();
        } else {
-               WRITE_ONCE(b, r);
+               WRITE_ONCE(b, 2);
                do_something_else();
        }
 
@@ -812,30 +817,28 @@ not necessarily apply to code following the if-statement:
 
        q = READ_ONCE(a);
        if (q) {
-               WRITE_ONCE(b, p);
+               WRITE_ONCE(b, 1);
        } else {
-               WRITE_ONCE(b, r);
+               WRITE_ONCE(b, 2);
        }
-       WRITE_ONCE(c, 1);  /* BUG: No ordering against the read from "a". */
+       WRITE_ONCE(c, 1);  /* BUG: No ordering against the read from 'a'. */
 
 It is tempting to argue that there in fact is ordering because the
 compiler cannot reorder volatile accesses and also cannot reorder
-the writes to "b" with the condition.  Unfortunately for this line
-of reasoning, the compiler might compile the two writes to "b" as
+the writes to 'b' with the condition.  Unfortunately for this line
+of reasoning, the compiler might compile the two writes to 'b' as
 conditional-move instructions, as in this fanciful pseudo-assembly
 language:
 
        ld r1,a
-       ld r2,p
-       ld r3,r
        cmp r1,$0
-       cmov,ne r4,r2
-       cmov,eq r4,r3
+       cmov,ne r4,$1
+       cmov,eq r4,$2
        st r4,b
        st $1,c
 
 A weakly ordered CPU would have no dependency of any sort between the load
-from "a" and the store to "c".  The control dependencies would extend
+from 'a' and the store to 'c'.  The control dependencies would extend
 only to the pair of cmov instructions and the store depending on them.
 In short, control dependencies apply only to the stores in the then-clause
 and else-clause of the if-statement in question (including functions
@@ -843,7 +846,7 @@ invoked by those two clauses), not to code following that if-statement.
 
 Finally, control dependencies do -not- provide transitivity.  This is
 demonstrated by two related examples, with the initial values of
-x and y both being zero:
+'x' and 'y' both being zero:
 
        CPU 0                     CPU 1
        =======================   =======================
@@ -915,6 +918,9 @@ In summary:
   (*) Control dependencies do -not- provide transitivity.  If you
       need transitivity, use smp_mb().
 
+  (*) Compilers do not understand control dependencies.  It is therefore
+      your job to ensure that they do not break your code.
+
 
 SMP BARRIER PAIRING
 -------------------
diff --git a/Documentation/mtd/intel-spi.txt b/Documentation/mtd/intel-spi.txt
new file mode 100644 (file)
index 0000000..bc35772
--- /dev/null
@@ -0,0 +1,88 @@
+Upgrading BIOS using intel-spi
+------------------------------
+
+Many Intel CPUs like Baytrail and Braswell include SPI serial flash host
+controller which is used to hold BIOS and other platform specific data.
+Since contents of the SPI serial flash is crucial for machine to function,
+it is typically protected by different hardware protection mechanisms to
+avoid accidental (or on purpose) overwrite of the content.
+
+Not all manufacturers protect the SPI serial flash, mainly because it
+allows upgrading the BIOS image directly from an OS.
+
+The intel-spi driver makes it possible to read and write the SPI serial
+flash, if certain protection bits are not set and locked. If it finds
+any of them set, the whole MTD device is made read-only to prevent
+partial overwrites. By default the driver exposes SPI serial flash
+contents as read-only but it can be changed from kernel command line,
+passing "intel-spi.writeable=1".
+
+Please keep in mind that overwriting the BIOS image on SPI serial flash
+might render the machine unbootable and requires special equipment like
+Dediprog to revive. You have been warned!
+
+Below are the steps how to upgrade MinnowBoard MAX BIOS directly from
+Linux.
+
+ 1) Download and extract the latest Minnowboard MAX BIOS SPI image
+    [1]. At the time writing this the latest image is v92.
+
+ 2) Install mtd-utils package [2]. We need this in order to erase the SPI
+    serial flash. Distros like Debian and Fedora have this prepackaged with
+    name "mtd-utils".
+
+ 3) Add "intel-spi.writeable=1" to the kernel command line and reboot
+    the board (you can also reload the driver passing "writeable=1" as
+    module parameter to modprobe).
+
+ 4) Once the board is up and running again, find the right MTD partition
+    (it is named as "BIOS"):
+
+    # cat /proc/mtd
+    dev:    size   erasesize  name
+    mtd0: 00800000 00001000 "BIOS"
+
+    So here it will be /dev/mtd0 but it may vary.
+
+ 5) Make backup of the existing image first:
+
+    # dd if=/dev/mtd0ro of=bios.bak
+    16384+0 records in
+    16384+0 records out
+    8388608 bytes (8.4 MB) copied, 10.0269 s, 837 kB/s
+
+ 6) Verify the backup
+
+    # sha1sum /dev/mtd0ro bios.bak
+    fdbb011920572ca6c991377c4b418a0502668b73  /dev/mtd0ro
+    fdbb011920572ca6c991377c4b418a0502668b73  bios.bak
+
+    The SHA1 sums must match. Otherwise do not continue any further!
+
+ 7) Erase the SPI serial flash. After this step, do not reboot the
+    board! Otherwise it will not start anymore.
+
+    # flash_erase /dev/mtd0 0 0
+    Erasing 4 Kibyte @ 7ff000 -- 100 % complete
+
+ 8) Once completed without errors you can write the new BIOS image:
+
+    # dd if=MNW2MAX1.X64.0092.R01.1605221712.bin of=/dev/mtd0
+
+ 9) Verify that the new content of the SPI serial flash matches the new
+    BIOS image:
+
+    # sha1sum /dev/mtd0ro MNW2MAX1.X64.0092.R01.1605221712.bin
+    9b4df9e4be2057fceec3a5529ec3d950836c87a2  /dev/mtd0ro
+    9b4df9e4be2057fceec3a5529ec3d950836c87a2 MNW2MAX1.X64.0092.R01.1605221712.bin
+
+    The SHA1 sums should match.
+
+ 10) Now you can reboot your board and observe the new BIOS starting up
+     properly.
+
+References
+----------
+
+[1] https://firmware.intel.com/sites/default/files/MinnowBoard.MAX_.X64.92.R01.zip
+[2] http://www.linux-mtd.infradead.org/
index 129f7c0e14839837e1ff4c1adc53544548a7d9a9..21d2d48f87a254a2c4281bae68646f29c393fb14 100644 (file)
@@ -163,8 +163,7 @@ of flags and remove sysfs attributes pm_qos_no_power_off and pm_qos_remote_wakeu
 under the device's power directory.
 
 Notification mechanisms:
-The per-device PM QoS framework has 2 different and distinct notification trees:
-a per-device notification tree and a global notification tree.
+The per-device PM QoS framework has a per-device notification tree.
 
 int dev_pm_qos_add_notifier(device, notifier):
 Adds a notification callback function for the device.
@@ -174,16 +173,6 @@ is changed (for resume latency device PM QoS only).
 int dev_pm_qos_remove_notifier(device, notifier):
 Removes the notification callback function for the device.
 
-int dev_pm_qos_add_global_notifier(notifier):
-Adds a notification callback function in the global notification tree of the
-framework.
-The callback is called when the aggregated value for any device is changed
-(for resume latency device PM QoS only).
-
-int dev_pm_qos_remove_global_notifier(notifier):
-Removes the notification callback function from the global notification tree
-of the framework.
-
 
 Active state latency tolerance
 
index 4870980e967e01f720236e7a3d6749ca11537c19..64546eb9a16a118771fb8d1d822ee8fdeafd46de 100644 (file)
@@ -100,7 +100,7 @@ knows what to do to handle the device).
   * If the suspend callback returns an error code different from -EBUSY and
     -EAGAIN, the PM core regards this as a fatal error and will refuse to run
     the helper functions described in Section 4 for the device until its status
-    is directly set to  either'active', or 'suspended' (the PM core provides
+    is directly set to  either 'active', or 'suspended' (the PM core provides
     special helper functions for this purpose).
 
 In particular, if the driver requires remote wakeup capability (i.e. hardware
@@ -217,7 +217,7 @@ defined in include/linux/pm.h:
       one to complete
 
   spinlock_t lock;
-    - lock used for synchronisation
+    - lock used for synchronization
 
   atomic_t usage_count;
     - the usage counter of the device
@@ -565,7 +565,7 @@ appropriate to ensure that the device is not put back to sleep during the
 probe. This can happen with systems such as the network device layer.
 
 It may be desirable to suspend the device once ->probe() has finished.
-Therefore the driver core uses the asyncronous pm_request_idle() to submit a
+Therefore the driver core uses the asynchronous pm_request_idle() to submit a
 request to execute the subsystem-level idle callback for the device at that
 time.  A driver that makes use of the runtime autosuspend feature, may want to
 update the last busy mark before returning from ->probe().
index 8a39ce45d8a01c298ec3b62b878e2623cba5a958..bc4548245a2431349b3d24655ab3b73794b7a10a 100644 (file)
@@ -25,7 +25,7 @@ to be used subsequently to change to the one represented by that string.
 Consequently, there are two ways to cause the system to go into the
 Suspend-To-Idle sleep state.  The first one is to write "freeze" directly to
 /sys/power/state.  The second one is to write "s2idle" to /sys/power/mem_sleep
-and then to wrtie "mem" to /sys/power/state.  Similarly, there are two ways
+and then to write "mem" to /sys/power/state.  Similarly, there are two ways
 to cause the system to go into the Power-On Suspend sleep state (the strings to
 write to the control files in that case are "standby" or "shallow" and "mem",
 respectively) if that state is supported by the platform.  In turn, there is
@@ -35,9 +35,7 @@ only one way to cause the system to go into the Suspend-To-RAM state (write
 The default suspend mode (ie. the one to be used without writing anything into
 /sys/power/mem_sleep) is either "deep" (if Suspend-To-RAM is supported) or
 "s2idle", but it can be overridden by the value of the "mem_sleep_default"
-parameter in the kernel command line.  On some ACPI-based systems, depending on
-the information in the FADT, the default may be "s2idle" even if Suspend-To-RAM
-is supported.
+parameter in the kernel command line.
 
 The properties of all of the sleep states are described below.
 
index 8e37b0ba2c9dd2f6de7e77e31fde58750693f748..cbc1b46cbf70b41602a3c7423f4f1f9833e68049 100644 (file)
@@ -408,6 +408,11 @@ CONTENTS
   * the new scheduling related syscalls that manipulate it, i.e.,
     sched_setattr() and sched_getattr() are implemented.
 
+ For debugging purposes, the leftover runtime and absolute deadline of a
+ SCHED_DEADLINE task can be retrieved through /proc/<pid>/sched (entries
+ dl.runtime and dl.deadline, both values in ns). A programmatic way to
+ retrieve these values from production code is under discussion.
+
 
 4.3 Default behavior
 ---------------------
@@ -476,6 +481,7 @@ CONTENTS
 
  Still missing:
 
+  - programmatic way to retrieve current runtime and absolute deadline
   - refinements to deadline inheritance, especially regarding the possibility
     of retaining bandwidth isolation among non-interacting tasks. This is
     being studied from both theoretical and practical points of view, and
index a03f0d944fe6dad87415588c3554bff04697a745..d8fce3e784574ee4d1653fd419a01220f15608bb 100644 (file)
@@ -158,11 +158,11 @@ as its prone to starvation without deadline scheduling.
 Consider two sibling groups A and B; both have 50% bandwidth, but A's
 period is twice the length of B's.
 
-* group A: period=100000us, runtime=10000us
-       - this runs for 0.01s once every 0.1s
+* group A: period=100000us, runtime=50000us
+       - this runs for 0.05s once every 0.1s
 
-* group B: period= 50000us, runtime=10000us
-       - this runs for 0.01s twice every 0.1s (or once every 0.05 sec).
+* group B: period= 50000us, runtime=25000us
+       - this runs for 0.025s twice every 0.1s (or once every 0.05 sec).
 
 This means that currently a while (1) loop in A will run for the full period of
 B and can starve B's tasks (assuming they are of lower priority) for a whole
diff --git a/Documentation/spi/ep93xx_spi b/Documentation/spi/ep93xx_spi
deleted file mode 100644 (file)
index 832ddce..0000000
+++ /dev/null
@@ -1,105 +0,0 @@
-Cirrus EP93xx SPI controller driver HOWTO
-=========================================
-
-ep93xx_spi driver brings SPI master support for EP93xx SPI controller.  Chip
-selects are implemented with GPIO lines.
-
-NOTE: If possible, don't use SFRMOUT (SFRM1) signal as a chip select. It will
-not work correctly (it cannot be controlled by software). Use GPIO lines
-instead.
-
-Sample configuration
-====================
-
-Typically driver configuration is done in platform board files (the files under
-arch/arm/mach-ep93xx/*.c). In this example we configure MMC over SPI through
-this driver on TS-7260 board. You can adapt the code to suit your needs.
-
-This example uses EGPIO9 as SD/MMC card chip select (this is wired in DIO1
-header on the board).
-
-You need to select CONFIG_MMC_SPI to use mmc_spi driver.
-
-arch/arm/mach-ep93xx/ts72xx.c:
-
-...
-#include <linux/gpio.h>
-#include <linux/spi/spi.h>
-
-#include <linux/platform_data/spi-ep93xx.h>
-
-/* this is our GPIO line used for chip select */
-#define MMC_CHIP_SELECT_GPIO EP93XX_GPIO_LINE_EGPIO9
-
-static int ts72xx_mmc_spi_setup(struct spi_device *spi)
-{
-       int err;
-
-       err = gpio_request(MMC_CHIP_SELECT_GPIO, spi->modalias);
-       if (err)
-               return err;
-
-       gpio_direction_output(MMC_CHIP_SELECT_GPIO, 1);
-
-       return 0;
-}
-
-static void ts72xx_mmc_spi_cleanup(struct spi_device *spi)
-{
-       gpio_set_value(MMC_CHIP_SELECT_GPIO, 1);
-       gpio_direction_input(MMC_CHIP_SELECT_GPIO);
-       gpio_free(MMC_CHIP_SELECT_GPIO);
-}
-
-static void ts72xx_mmc_spi_cs_control(struct spi_device *spi, int value)
-{
-       gpio_set_value(MMC_CHIP_SELECT_GPIO, value);
-}
-
-static struct ep93xx_spi_chip_ops ts72xx_mmc_spi_ops = {
-       .setup          = ts72xx_mmc_spi_setup,
-       .cleanup        = ts72xx_mmc_spi_cleanup,
-       .cs_control     = ts72xx_mmc_spi_cs_control,
-};
-
-static struct spi_board_info ts72xx_spi_devices[] __initdata = {
-       {
-               .modalias               = "mmc_spi",
-               .controller_data        = &ts72xx_mmc_spi_ops,
-               /*
-                * We use 10 MHz even though the maximum is 7.4 MHz. The driver
-                * will limit it automatically to max. frequency.
-                */
-               .max_speed_hz           = 10 * 1000 * 1000,
-               .bus_num                = 0,
-               .chip_select            = 0,
-               .mode                   = SPI_MODE_0,
-       },
-};
-
-static struct ep93xx_spi_info ts72xx_spi_info = {
-       .num_chipselect = ARRAY_SIZE(ts72xx_spi_devices),
-};
-
-static void __init ts72xx_init_machine(void)
-{
-       ...
-       ep93xx_register_spi(&ts72xx_spi_info, ts72xx_spi_devices,
-                           ARRAY_SIZE(ts72xx_spi_devices));
-}
-
-The driver can use DMA for the transfers also. In this case ts72xx_spi_info
-becomes:
-
-static struct ep93xx_spi_info ts72xx_spi_info = {
-       .num_chipselect = ARRAY_SIZE(ts72xx_spi_devices),
-       .use_dma        = true;
-};
-
-Note that CONFIG_EP93XX_DMA should be enabled as well.
-
-Thanks to
-=========
-Martin Guy, H. Hartley Sweeten and others who helped me during development of
-the driver. Simplemachines.it donated me a Sim.One board which I used testing
-the driver on EP9307.
diff --git a/Documentation/timers/timer_stats.txt b/Documentation/timers/timer_stats.txt
deleted file mode 100644 (file)
index de835ee..0000000
+++ /dev/null
@@ -1,73 +0,0 @@
-timer_stats - timer usage statistics
-------------------------------------
-
-timer_stats is a debugging facility to make the timer (ab)usage in a Linux
-system visible to kernel and userspace developers. If enabled in the config
-but not used it has almost zero runtime overhead, and a relatively small
-data structure overhead. Even if collection is enabled runtime all the
-locking is per-CPU and lookup is hashed.
-
-timer_stats should be used by kernel and userspace developers to verify that
-their code does not make unduly use of timers. This helps to avoid unnecessary
-wakeups, which should be avoided to optimize power consumption.
-
-It can be enabled by CONFIG_TIMER_STATS in the "Kernel hacking" configuration
-section.
-
-timer_stats collects information about the timer events which are fired in a
-Linux system over a sample period:
-
-- the pid of the task(process) which initialized the timer
-- the name of the process which initialized the timer
-- the function where the timer was initialized
-- the callback function which is associated to the timer
-- the number of events (callbacks)
-
-timer_stats adds an entry to /proc: /proc/timer_stats
-
-This entry is used to control the statistics functionality and to read out the
-sampled information.
-
-The timer_stats functionality is inactive on bootup.
-
-To activate a sample period issue:
-# echo 1 >/proc/timer_stats
-
-To stop a sample period issue:
-# echo 0 >/proc/timer_stats
-
-The statistics can be retrieved by:
-# cat /proc/timer_stats
-
-While sampling is enabled, each readout from /proc/timer_stats will see
-newly updated statistics. Once sampling is disabled, the sampled information
-is kept until a new sample period is started. This allows multiple readouts.
-
-Sample output of /proc/timer_stats:
-
-Timerstats sample period: 3.888770 s
-  12,     0 swapper          hrtimer_stop_sched_tick (hrtimer_sched_tick)
-  15,     1 swapper          hcd_submit_urb (rh_timer_func)
-   4,   959 kedac            schedule_timeout (process_timeout)
-   1,     0 swapper          page_writeback_init (wb_timer_fn)
-  28,     0 swapper          hrtimer_stop_sched_tick (hrtimer_sched_tick)
-  22,  2948 IRQ 4            tty_flip_buffer_push (delayed_work_timer_fn)
-   3,  3100 bash             schedule_timeout (process_timeout)
-   1,     1 swapper          queue_delayed_work_on (delayed_work_timer_fn)
-   1,     1 swapper          queue_delayed_work_on (delayed_work_timer_fn)
-   1,     1 swapper          neigh_table_init_no_netlink (neigh_periodic_timer)
-   1,  2292 ip               __netdev_watchdog_up (dev_watchdog)
-   1,    23 events/1         do_cache_clean (delayed_work_timer_fn)
-90 total events, 30.0 events/sec
-
-The first column is the number of events, the second column the pid, the third
-column is the name of the process. The forth column shows the function which
-initialized the timer and in parenthesis the callback function which was
-executed on expiry.
-
-    Thomas, Ingo
-
-Added flag to indicate 'deferrable timer' in /proc/timer_stats. A deferrable
-timer will appear as follows
-  10D,     1 swapper          queue_delayed_work_on (delayed_work_timer_fn)
-
index 95a4d34af3fdd799d28046e0b992dcbc049817ce..b8527c6b76461c7ee9446630c5acb4cc65867671 100644 (file)
@@ -31,6 +31,8 @@ Offset        Proto   Name            Meaning
 1E9/001        ALL     eddbuf_entries  Number of entries in eddbuf (below)
 1EA/001        ALL     edd_mbr_sig_buf_entries Number of entries in edd_mbr_sig_buffer
                                (below)
+1EB/001        ALL     kbd_status      Numlock is enabled
+1EC/001        ALL     secure_boot     Secure boot is enabled in the firmware
 1EF/001        ALL     sentinel        Used to detect broken bootloaders
 290/040        ALL     edd_mbr_sig_buffer EDD MBR signatures
 2D0/A00        ALL     e820_map        E820 memory map table
index 26edd832c64ed14d0939897194ca1d9e103c76e5..baac725ce1ff2628a5d0e40c1edefb994a2dada4 100644 (file)
@@ -643,7 +643,7 @@ S:  Maintained
 F:     drivers/gpio/gpio-altera.c
 
 ALTERA SYSTEM RESOURCE DRIVER FOR ARRIA10 DEVKIT
-M:     Thor Thayer <tthayer@opensource.altera.com>
+M:     Thor Thayer <thor.thayer@linux.intel.com>
 S:     Maintained
 F:     drivers/gpio/gpio-altera-a10sr.c
 F:     drivers/mfd/altera-a10sr.c
@@ -877,8 +877,8 @@ S:  Odd fixes
 F:     drivers/hwmon/applesmc.c
 
 APPLETALK NETWORK LAYER
-M:     Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
-S:     Maintained
+L:     netdev@vger.kernel.org
+S:     Odd fixes
 F:     drivers/net/appletalk/
 F:     net/appletalk/
 
@@ -1091,7 +1091,7 @@ F:        arch/arm/boot/dts/aspeed-*
 F:     drivers/*/*aspeed*
 
 ARM/ATMEL AT91RM9200, AT91SAM9 AND SAMA5 SOC SUPPORT
-M:     Nicolas Ferre <nicolas.ferre@atmel.com>
+M:     Nicolas Ferre <nicolas.ferre@microchip.com>
 M:     Alexandre Belloni <alexandre.belloni@free-electrons.com>
 M:     Jean-Christophe Plagniol-Villard <plagnioj@jcrosoft.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -1773,7 +1773,7 @@ F:        drivers/soc/renesas/
 F:     include/linux/soc/renesas/
 
 ARM/SOCFPGA ARCHITECTURE
-M:     Dinh Nguyen <dinguyen@opensource.altera.com>
+M:     Dinh Nguyen <dinguyen@kernel.org>
 S:     Maintained
 F:     arch/arm/mach-socfpga/
 F:     arch/arm/boot/dts/socfpga*
@@ -1783,12 +1783,12 @@ W:      http://www.rocketboards.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/dinguyen/linux.git
 
 ARM/SOCFPGA CLOCK FRAMEWORK SUPPORT
-M:     Dinh Nguyen <dinguyen@opensource.altera.com>
+M:     Dinh Nguyen <dinguyen@kernel.org>
 S:     Maintained
 F:     drivers/clk/socfpga/
 
 ARM/SOCFPGA EDAC SUPPORT
-M:     Thor Thayer <tthayer@opensource.altera.com>
+M:     Thor Thayer <thor.thayer@linux.intel.com>
 S:     Maintained
 F:     drivers/edac/altera_edac.
 
@@ -2175,56 +2175,56 @@ F:      include/linux/atm*
 F:     include/uapi/linux/atm*
 
 ATMEL AT91 / AT32 MCI DRIVER
-M:     Ludovic Desroches <ludovic.desroches@atmel.com>
+M:     Ludovic Desroches <ludovic.desroches@microchip.com>
 S:     Maintained
 F:     drivers/mmc/host/atmel-mci.c
 
 ATMEL AT91 SAMA5D2-Compatible Shutdown Controller
-M:     Nicolas Ferre <nicolas.ferre@atmel.com>
+M:     Nicolas Ferre <nicolas.ferre@microchip.com>
 S:     Supported
 F:     drivers/power/reset/at91-sama5d2_shdwc.c
 
 ATMEL SAMA5D2 ADC DRIVER
-M:     Ludovic Desroches <ludovic.desroches@atmel.com>
+M:     Ludovic Desroches <ludovic.desroches@microchip.com>
 L:     linux-iio@vger.kernel.org
 S:     Supported
 F:     drivers/iio/adc/at91-sama5d2_adc.c
 
 ATMEL Audio ALSA driver
-M:     Nicolas Ferre <nicolas.ferre@atmel.com>
+M:     Nicolas Ferre <nicolas.ferre@microchip.com>
 L:     alsa-devel@alsa-project.org (moderated for non-subscribers)
 S:     Supported
 F:     sound/soc/atmel
 
 ATMEL XDMA DRIVER
-M:     Ludovic Desroches <ludovic.desroches@atmel.com>
+M:     Ludovic Desroches <ludovic.desroches@microchip.com>
 L:     linux-arm-kernel@lists.infradead.org
 L:     dmaengine@vger.kernel.org
 S:     Supported
 F:     drivers/dma/at_xdmac.c
 
 ATMEL I2C DRIVER
-M:     Ludovic Desroches <ludovic.desroches@atmel.com>
+M:     Ludovic Desroches <ludovic.desroches@microchip.com>
 L:     linux-i2c@vger.kernel.org
 S:     Supported
 F:     drivers/i2c/busses/i2c-at91.c
 
 ATMEL ISI DRIVER
-M:     Ludovic Desroches <ludovic.desroches@atmel.com>
+M:     Ludovic Desroches <ludovic.desroches@microchip.com>
 L:     linux-media@vger.kernel.org
 S:     Supported
 F:     drivers/media/platform/soc_camera/atmel-isi.c
 F:     include/media/atmel-isi.h
 
 ATMEL LCDFB DRIVER
-M:     Nicolas Ferre <nicolas.ferre@atmel.com>
+M:     Nicolas Ferre <nicolas.ferre@microchip.com>
 L:     linux-fbdev@vger.kernel.org
 S:     Maintained
 F:     drivers/video/fbdev/atmel_lcdfb.c
 F:     include/video/atmel_lcdc.h
 
 ATMEL MACB ETHERNET DRIVER
-M:     Nicolas Ferre <nicolas.ferre@atmel.com>
+M:     Nicolas Ferre <nicolas.ferre@microchip.com>
 S:     Supported
 F:     drivers/net/ethernet/cadence/
 
@@ -2236,32 +2236,32 @@ S:      Supported
 F:     drivers/mtd/nand/atmel_nand*
 
 ATMEL SDMMC DRIVER
-M:     Ludovic Desroches <ludovic.desroches@atmel.com>
+M:     Ludovic Desroches <ludovic.desroches@microchip.com>
 L:     linux-mmc@vger.kernel.org
 S:     Supported
 F:     drivers/mmc/host/sdhci-of-at91.c
 
 ATMEL SPI DRIVER
-M:     Nicolas Ferre <nicolas.ferre@atmel.com>
+M:     Nicolas Ferre <nicolas.ferre@microchip.com>
 S:     Supported
 F:     drivers/spi/spi-atmel.*
 
 ATMEL SSC DRIVER
-M:     Nicolas Ferre <nicolas.ferre@atmel.com>
+M:     Nicolas Ferre <nicolas.ferre@microchip.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Supported
 F:     drivers/misc/atmel-ssc.c
 F:     include/linux/atmel-ssc.h
 
 ATMEL Timer Counter (TC) AND CLOCKSOURCE DRIVERS
-M:     Nicolas Ferre <nicolas.ferre@atmel.com>
+M:     Nicolas Ferre <nicolas.ferre@microchip.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Supported
 F:     drivers/misc/atmel_tclib.c
 F:     drivers/clocksource/tcb_clksrc.c
 
 ATMEL USBA UDC DRIVER
-M:     Nicolas Ferre <nicolas.ferre@atmel.com>
+M:     Nicolas Ferre <nicolas.ferre@microchip.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Supported
 F:     drivers/usb/gadget/udc/atmel_usba_udc.*
@@ -2692,6 +2692,13 @@ F:       drivers/irqchip/irq-brcmstb*
 F:     include/linux/bcm963xx_nvram.h
 F:     include/linux/bcm963xx_tag.h
 
+BROADCOM BMIPS CPUFREQ DRIVER
+M:     Markus Mayer <mmayer@broadcom.com>
+M:     bcm-kernel-feedback-list@broadcom.com
+L:     linux-pm@vger.kernel.org
+S:     Maintained
+F:     drivers/cpufreq/bmips-cpufreq.c
+
 BROADCOM TG3 GIGABIT ETHERNET DRIVER
 M:     Siva Reddy Kallam <siva.kallam@broadcom.com>
 M:     Prashant Sreedharan <prashant@broadcom.com>
@@ -3567,7 +3574,7 @@ F:        drivers/infiniband/hw/cxgb3/
 F:     include/uapi/rdma/cxgb3-abi.h
 
 CXGB4 ETHERNET DRIVER (CXGB4)
-M:     Hariprasad S <hariprasad@chelsio.com>
+M:     Ganesh Goudar <ganeshgr@chelsio.com>
 L:     netdev@vger.kernel.org
 W:     http://www.chelsio.com
 S:     Supported
@@ -4100,12 +4107,18 @@ F:      drivers/gpu/drm/bridge/
 
 DRM DRIVER FOR BOCHS VIRTUAL GPU
 M:     Gerd Hoffmann <kraxel@redhat.com>
-S:     Odd Fixes
+L:     virtualization@lists.linux-foundation.org
+T:     git git://git.kraxel.org/linux drm-qemu
+S:     Maintained
 F:     drivers/gpu/drm/bochs/
 
 DRM DRIVER FOR QEMU'S CIRRUS DEVICE
 M:     Dave Airlie <airlied@redhat.com>
-S:     Odd Fixes
+M:     Gerd Hoffmann <kraxel@redhat.com>
+L:     virtualization@lists.linux-foundation.org
+T:     git git://git.kraxel.org/linux drm-qemu
+S:     Obsolete
+W:     https://www.kraxel.org/blog/2014/10/qemu-using-cirrus-considered-harmful/
 F:     drivers/gpu/drm/cirrus/
 
 RADEON and AMDGPU DRM DRIVERS
@@ -4147,7 +4160,7 @@ F:        Documentation/gpu/i915.rst
 INTEL GVT-g DRIVERS (Intel GPU Virtualization)
 M:      Zhenyu Wang <zhenyuw@linux.intel.com>
 M:      Zhi Wang <zhi.a.wang@intel.com>
-L:      igvt-g-dev@lists.01.org
+L:      intel-gvt-dev@lists.freedesktop.org
 L:      intel-gfx@lists.freedesktop.org
 W:      https://01.org/igvt-g
 T:      git https://github.com/01org/gvt-linux.git
@@ -4298,7 +4311,10 @@ F:       Documentation/devicetree/bindings/display/renesas,du.txt
 
 DRM DRIVER FOR QXL VIRTUAL GPU
 M:     Dave Airlie <airlied@redhat.com>
-S:     Odd Fixes
+M:     Gerd Hoffmann <kraxel@redhat.com>
+L:     virtualization@lists.linux-foundation.org
+T:     git git://git.kraxel.org/linux drm-qemu
+S:     Maintained
 F:     drivers/gpu/drm/qxl/
 F:     include/uapi/drm/qxl_drm.h
 
@@ -6718,9 +6734,8 @@ S:        Odd Fixes
 F:     drivers/tty/ipwireless/
 
 IPX NETWORK LAYER
-M:     Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
 L:     netdev@vger.kernel.org
-S:     Maintained
+S:     Odd fixes
 F:     include/net/ipx.h
 F:     include/uapi/linux/ipx.h
 F:     net/ipx/
@@ -7492,8 +7507,8 @@ S:        Maintained
 F:     drivers/misc/lkdtm*
 
 LLC (802.2)
-M:     Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
-S:     Maintained
+L:     netdev@vger.kernel.org
+S:     Odd fixes
 F:     include/linux/llc.h
 F:     include/uapi/linux/llc.h
 F:     include/net/llc*
@@ -7526,6 +7541,7 @@ S:        Maintained
 F:     Documentation/hwmon/lm90
 F:     Documentation/devicetree/bindings/hwmon/lm90.txt
 F:     drivers/hwmon/lm90.c
+F:     include/dt-bindings/thermal/lm90.h
 
 LM95234 HARDWARE MONITOR DRIVER
 M:     Guenter Roeck <linux@roeck-us.net>
@@ -9727,7 +9743,7 @@ S:        Maintained
 F:     drivers/pinctrl/pinctrl-at91.*
 
 PIN CONTROLLER - ATMEL AT91 PIO4
-M:     Ludovic Desroches <ludovic.desroches@atmel.com>
+M:     Ludovic Desroches <ludovic.desroches@microchip.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 L:     linux-gpio@vger.kernel.org
 S:     Supported
@@ -10186,7 +10202,6 @@ F:      drivers/media/tuners/qt1010*
 QUALCOMM ATHEROS ATH9K WIRELESS DRIVER
 M:     QCA ath9k Development <ath9k-devel@qca.qualcomm.com>
 L:     linux-wireless@vger.kernel.org
-L:     ath9k-devel@lists.ath9k.org
 W:     http://wireless.kernel.org/en/users/Drivers/ath9k
 S:     Supported
 F:     drivers/net/wireless/ath/ath9k/
@@ -13057,7 +13072,7 @@ F:      drivers/input/serio/userio.c
 F:     include/uapi/linux/userio.h
 
 VIRTIO CONSOLE DRIVER
-M:     Amit Shah <amit.shah@redhat.com>
+M:     Amit Shah <amit@kernel.org>
 L:     virtualization@lists.linux-foundation.org
 S:     Maintained
 F:     drivers/char/virtio_console.c
@@ -13092,6 +13107,7 @@ M:      David Airlie <airlied@linux.ie>
 M:     Gerd Hoffmann <kraxel@redhat.com>
 L:     dri-devel@lists.freedesktop.org
 L:     virtualization@lists.linux-foundation.org
+T:     git git://git.kraxel.org/linux drm-qemu
 S:     Maintained
 F:     drivers/gpu/drm/virtio/
 F:     include/uapi/linux/virtio_gpu.h
@@ -13364,10 +13380,8 @@ S:     Maintained
 F:     drivers/input/misc/wistron_btns.c
 
 WL3501 WIRELESS PCMCIA CARD DRIVER
-M:     Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
 L:     linux-wireless@vger.kernel.org
-W:     http://oops.ghostprotocols.net:81/blog
-S:     Maintained
+S:     Odd fixes
 F:     drivers/net/wireless/wl3501*
 
 WOLFSON MICROELECTRONICS DRIVERS
@@ -13443,6 +13457,7 @@ F:      arch/x86/
 
 X86 PLATFORM DRIVERS
 M:     Darren Hart <dvhart@infradead.org>
+M:     Andy Shevchenko <andy@infradead.org>
 L:     platform-driver-x86@vger.kernel.org
 T:     git git://git.infradead.org/users/dvhart/linux-platform-drivers-x86.git
 S:     Maintained
@@ -13614,6 +13629,7 @@ F:      drivers/net/hamradio/z8530.h
 
 ZBUD COMPRESSED PAGE ALLOCATOR
 M:     Seth Jennings <sjenning@redhat.com>
+M:     Dan Streetman <ddstreet@ieee.org>
 L:     linux-mm@kvack.org
 S:     Maintained
 F:     mm/zbud.c
@@ -13669,6 +13685,7 @@ F:      Documentation/vm/zsmalloc.txt
 
 ZSWAP COMPRESSED SWAP CACHING
 M:     Seth Jennings <sjenning@redhat.com>
+M:     Dan Streetman <ddstreet@ieee.org>
 L:     linux-mm@kvack.org
 S:     Maintained
 F:     mm/zswap.c
index 098840012b9bb4604d82c5269e53274170ae656f..4e2abc36e14b79faaf1063c674b6f7dd85c445ea 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,8 +1,8 @@
 VERSION = 4
 PATCHLEVEL = 10
 SUBLEVEL = 0
-EXTRAVERSION = -rc5
-NAME = Anniversary Edition
+EXTRAVERSION =
+NAME = Fearless Coyote
 
 # *DOCUMENTATION*
 # To see a list of typical targets execute "make help"
@@ -87,10 +87,12 @@ endif
 ifneq ($(filter 4.%,$(MAKE_VERSION)),) # make-4
 ifneq ($(filter %s ,$(firstword x$(MAKEFLAGS))),)
   quiet=silent_
+  tools_silent=s
 endif
 else                                   # make-3.8x
 ifneq ($(filter s% -s%,$(MAKEFLAGS)),)
   quiet=silent_
+  tools_silent=-s
 endif
 endif
 
@@ -797,7 +799,7 @@ KBUILD_CFLAGS   += $(call cc-option,-Werror=incompatible-pointer-types)
 KBUILD_ARFLAGS := $(call ar-option,D)
 
 # check for 'asm goto'
-ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC)), y)
+ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y)
        KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO
        KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO
 endif
@@ -1607,11 +1609,11 @@ image_name:
 # Clear a bunch of variables before executing the submake
 tools/: FORCE
        $(Q)mkdir -p $(objtree)/tools
-       $(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(filter --j% -j,$(MAKEFLAGS))" O=$(shell cd $(objtree) && /bin/pwd) subdir=tools -C $(src)/tools/
+       $(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(tools_silent) $(filter --j% -j,$(MAKEFLAGS))" O=$(shell cd $(objtree) && /bin/pwd) subdir=tools -C $(src)/tools/
 
 tools/%: FORCE
        $(Q)mkdir -p $(objtree)/tools
-       $(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(filter --j% -j,$(MAKEFLAGS))" O=$(shell cd $(objtree) && /bin/pwd) subdir=tools -C $(src)/tools/ $*
+       $(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(tools_silent) $(filter --j% -j,$(MAKEFLAGS))" O=$(shell cd $(objtree) && /bin/pwd) subdir=tools -C $(src)/tools/ $*
 
 # Single targets
 # ---------------------------------------------------------------------------
index bf8475ce85ee2ab37a5e69faf86f7acd22ac59dc..baa152b9348e7dd230472c0218817bb97e89255e 100644 (file)
@@ -1,7 +1,6 @@
 
 
 generic-y += clkdev.h
-generic-y += cputime.h
 generic-y += exec.h
 generic-y += export.h
 generic-y += irq_work.h
index 54d8616644e2dfd510846b30b52e2b7e21c1c888..9d27a7d333dca277a40d22e63a382cb78edf6a7d 100644 (file)
@@ -1145,7 +1145,7 @@ struct rusage32 {
 SYSCALL_DEFINE2(osf_getrusage, int, who, struct rusage32 __user *, ru)
 {
        struct rusage32 r;
-       cputime_t utime, stime;
+       u64 utime, stime;
        unsigned long utime_jiffies, stime_jiffies;
 
        if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN)
@@ -1155,16 +1155,16 @@ SYSCALL_DEFINE2(osf_getrusage, int, who, struct rusage32 __user *, ru)
        switch (who) {
        case RUSAGE_SELF:
                task_cputime(current, &utime, &stime);
-               utime_jiffies = cputime_to_jiffies(utime);
-               stime_jiffies = cputime_to_jiffies(stime);
+               utime_jiffies = nsecs_to_jiffies(utime);
+               stime_jiffies = nsecs_to_jiffies(stime);
                jiffies_to_timeval32(utime_jiffies, &r.ru_utime);
                jiffies_to_timeval32(stime_jiffies, &r.ru_stime);
                r.ru_minflt = current->min_flt;
                r.ru_majflt = current->maj_flt;
                break;
        case RUSAGE_CHILDREN:
-               utime_jiffies = cputime_to_jiffies(current->signal->cutime);
-               stime_jiffies = cputime_to_jiffies(current->signal->cstime);
+               utime_jiffies = nsecs_to_jiffies(current->signal->cutime);
+               stime_jiffies = nsecs_to_jiffies(current->signal->cstime);
                jiffies_to_timeval32(utime_jiffies, &r.ru_utime);
                jiffies_to_timeval32(stime_jiffies, &r.ru_stime);
                r.ru_minflt = current->signal->cmin_flt;
index c332604606dd4b43b210ff08bd7d1f76eed546a7..63a04013d05a595ffa5e52d0237690aa85a41bb5 100644 (file)
@@ -2,7 +2,6 @@ generic-y += auxvec.h
 generic-y += bitsperlong.h
 generic-y += bugs.h
 generic-y += clkdev.h
-generic-y += cputime.h
 generic-y += device.h
 generic-y += div64.h
 generic-y += emergency-restart.h
index a36e8601114d2ca2970f257f9f8d7e5c03ec08a2..d5da2115d78a678e343da2abec51f6c8efbbe0a4 100644 (file)
@@ -26,7 +26,9 @@ static inline void __delay(unsigned long loops)
        "       lp  1f                  \n"
        "       nop                     \n"
        "1:                             \n"
-       : : "r"(loops));
+       :
+        : "r"(loops)
+        : "lp_count");
 }
 
 extern void __bad_udelay(void);
index 689dd867fdff53eeafa0d01d980ecf425f66a759..8b90d25a15cca8ebd334402848d98aa22f07b8bf 100644 (file)
@@ -71,14 +71,14 @@ ENTRY(stext)
        GET_CPU_ID  r5
        cmp     r5, 0
        mov.nz  r0, r5
-#ifdef CONFIG_ARC_SMP_HALT_ON_RESET
-       ; Non-Master can proceed as system would be booted sufficiently
-       jnz     first_lines_of_secondary
-#else
+       bz      .Lmaster_proceed
+
        ; Non-Masters wait for Master to boot enough and bring them up
-       jnz     arc_platform_smp_wait_to_boot
-#endif
-       ; Master falls thru
+       ; when they resume, tail-call to entry point
+       mov     blink, @first_lines_of_secondary
+       j       arc_platform_smp_wait_to_boot
+
+.Lmaster_proceed:
 #endif
 
        ; Clear BSS before updating any globals
index 9274f8ade8c7ce58cdb6b943c9efed3bd995a3bb..9f6b68fd4f3bcfc6df3741f5206bf38ebb304ce7 100644 (file)
@@ -93,11 +93,10 @@ static void mcip_probe_n_setup(void)
        READ_BCR(ARC_REG_MCIP_BCR, mp);
 
        sprintf(smp_cpuinfo_buf,
-               "Extn [SMP]\t: ARConnect (v%d): %d cores with %s%s%s%s%s\n",
+               "Extn [SMP]\t: ARConnect (v%d): %d cores with %s%s%s%s\n",
                mp.ver, mp.num_cores,
                IS_AVAIL1(mp.ipi, "IPI "),
                IS_AVAIL1(mp.idu, "IDU "),
-               IS_AVAIL1(mp.llm, "LLM "),
                IS_AVAIL1(mp.dbg, "DEBUG "),
                IS_AVAIL1(mp.gfrc, "GFRC"));
 
@@ -175,7 +174,6 @@ static void idu_irq_unmask(struct irq_data *data)
        raw_spin_unlock_irqrestore(&mcip_lock, flags);
 }
 
-#ifdef CONFIG_SMP
 static int
 idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask,
                     bool force)
@@ -205,12 +203,27 @@ idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask,
 
        return IRQ_SET_MASK_OK;
 }
-#endif
+
+static void idu_irq_enable(struct irq_data *data)
+{
+       /*
+        * By default send all common interrupts to all available online CPUs.
+        * The affinity of common interrupts in IDU must be set manually since
+        * in some cases the kernel will not call irq_set_affinity() by itself:
+        *   1. When the kernel is not configured with support of SMP.
+        *   2. When the kernel is configured with support of SMP but upper
+        *      interrupt controllers does not support setting of the affinity
+        *      and cannot propagate it to IDU.
+        */
+       idu_irq_set_affinity(data, cpu_online_mask, false);
+       idu_irq_unmask(data);
+}
 
 static struct irq_chip idu_irq_chip = {
        .name                   = "MCIP IDU Intc",
        .irq_mask               = idu_irq_mask,
        .irq_unmask             = idu_irq_unmask,
+       .irq_enable             = idu_irq_enable,
 #ifdef CONFIG_SMP
        .irq_set_affinity       = idu_irq_set_affinity,
 #endif
@@ -243,36 +256,14 @@ static int idu_irq_xlate(struct irq_domain *d, struct device_node *n,
                         const u32 *intspec, unsigned int intsize,
                         irq_hw_number_t *out_hwirq, unsigned int *out_type)
 {
-       irq_hw_number_t hwirq = *out_hwirq = intspec[0];
-       int distri = intspec[1];
-       unsigned long flags;
-
+       /*
+        * Ignore value of interrupt distribution mode for common interrupts in
+        * IDU which resides in intspec[1] since setting an affinity using value
+        * from Device Tree is deprecated in ARC.
+        */
+       *out_hwirq = intspec[0];
        *out_type = IRQ_TYPE_NONE;
 
-       /* XXX: validate distribution scheme again online cpu mask */
-       if (distri == 0) {
-               /* 0 - Round Robin to all cpus, otherwise 1 bit per core */
-               raw_spin_lock_irqsave(&mcip_lock, flags);
-               idu_set_dest(hwirq, BIT(num_online_cpus()) - 1);
-               idu_set_mode(hwirq, IDU_M_TRIG_LEVEL, IDU_M_DISTRI_RR);
-               raw_spin_unlock_irqrestore(&mcip_lock, flags);
-       } else {
-               /*
-                * DEST based distribution for Level Triggered intr can only
-                * have 1 CPU, so generalize it to always contain 1 cpu
-                */
-               int cpu = ffs(distri);
-
-               if (cpu != fls(distri))
-                       pr_warn("IDU irq %lx distri mode set to cpu %x\n",
-                               hwirq, cpu);
-
-               raw_spin_lock_irqsave(&mcip_lock, flags);
-               idu_set_dest(hwirq, cpu);
-               idu_set_mode(hwirq, IDU_M_TRIG_LEVEL, IDU_M_DISTRI_DEST);
-               raw_spin_unlock_irqrestore(&mcip_lock, flags);
-       }
-
        return 0;
 }
 
index 88674d972c9d056f33f87205aa77049c11006129..2afbafadb6ab529ebaa7af4e367d733fc7837e1e 100644 (file)
@@ -90,22 +90,37 @@ void __init smp_cpus_done(unsigned int max_cpus)
  */
 static volatile int wake_flag;
 
+#ifdef CONFIG_ISA_ARCOMPACT
+
+#define __boot_read(f)         f
+#define __boot_write(f, v)     f = v
+
+#else
+
+#define __boot_read(f)         arc_read_uncached_32(&f)
+#define __boot_write(f, v)     arc_write_uncached_32(&f, v)
+
+#endif
+
 static void arc_default_smp_cpu_kick(int cpu, unsigned long pc)
 {
        BUG_ON(cpu == 0);
-       wake_flag = cpu;
+
+       __boot_write(wake_flag, cpu);
 }
 
 void arc_platform_smp_wait_to_boot(int cpu)
 {
-       while (wake_flag != cpu)
+       /* for halt-on-reset, we've waited already */
+       if (IS_ENABLED(CONFIG_ARC_SMP_HALT_ON_RESET))
+               return;
+
+       while (__boot_read(wake_flag) != cpu)
                ;
 
-       wake_flag = 0;
-       __asm__ __volatile__("j @first_lines_of_secondary       \n");
+       __boot_write(wake_flag, 0);
 }
 
-
 const char *arc_platform_smp_cpuinfo(void)
 {
        return plat_smp_ops.info ? : "";
index abd961f3e7639f13fb7954da68885ed0215f432c..5f69c3bd59bba47babf92c97a9c7e5a0174c5648 100644 (file)
@@ -241,8 +241,9 @@ int misaligned_fixup(unsigned long address, struct pt_regs *regs,
        if (state.fault)
                goto fault;
 
+       /* clear any remanants of delay slot */
        if (delay_mode(regs)) {
-               regs->ret = regs->bta;
+               regs->ret = regs->bta & ~1U;
                regs->status32 &= ~STATUS_DE_MASK;
        } else {
                regs->ret += state.instr_len;
index f10fe8526239552a676df4a4bfb1ae6a21b1aa41..01d178a2009f598f133e890a46c3e3e99efcc63c 100644 (file)
@@ -617,7 +617,7 @@ dtb-$(CONFIG_ARCH_ORION5X) += \
        orion5x-lacie-ethernet-disk-mini-v2.dtb \
        orion5x-linkstation-lsgl.dtb \
        orion5x-linkstation-lswtgl.dtb \
-       orion5x-lschl.dtb \
+       orion5x-linkstation-lschl.dtb \
        orion5x-lswsgl.dtb \
        orion5x-maxtor-shared-storage-2.dtb \
        orion5x-netgear-wnr854t.dtb \
index b792eee3899b25e7a7a206d16f5737f5fd631a9d..2ee40bc9ec21aafb9833bef482e344c2213273a9 100644 (file)
 / {
        #address-cells = <1>;
        #size-cells = <1>;
+       /*
+        * The decompressor and also some bootloaders rely on a
+        * pre-existing /chosen node to be available to insert the
+        * command line and merge other ATAGS info.
+        * Also for U-Boot there must be a pre-existing /memory node.
+        */
+       chosen {};
+       memory { device_type = "memory"; reg = <0 0>; };
 
        aliases {
                gpio0 = &gpio1;
index ac2a9da62b6ce81e4130c44185300ac95c31b2ca..43ccbbf754a340ab552a10ae0578badc59287aa4 100644 (file)
        #size-cells = <1>;
 
        interrupt-parent = <&icoll>;
+       /*
+        * The decompressor and also some bootloaders rely on a
+        * pre-existing /chosen node to be available to insert the
+        * command line and merge other ATAGS info.
+        * Also for U-Boot there must be a pre-existing /memory node.
+        */
+       chosen {};
+       memory { device_type = "memory"; reg = <0 0>; };
 
        aliases {
                gpio0 = &gpio0;
index 831d09a28155c5caa0b26ee6be42c20986067536..acd475659156bd05a0cfa9c94eeb778578a480d6 100644 (file)
 / {
        #address-cells = <1>;
        #size-cells = <1>;
+       /*
+        * The decompressor and also some bootloaders rely on a
+        * pre-existing /chosen node to be available to insert the
+        * command line and merge other ATAGS info.
+        * Also for U-Boot there must be a pre-existing /memory node.
+        */
+       chosen {};
+       memory { device_type = "memory"; reg = <0 0>; };
 
        aliases {
                ethernet0 = &fec;
index 9d8b5969ee3b0d9455afee96c666e77cae1cc713..b397384248f4b90876f8983ed95881e6caa1cb7d 100644 (file)
 / {
        #address-cells = <1>;
        #size-cells = <1>;
+       /*
+        * The decompressor and also some bootloaders rely on a
+        * pre-existing /chosen node to be available to insert the
+        * command line and merge other ATAGS info.
+        * Also for U-Boot there must be a pre-existing /memory node.
+        */
+       chosen {};
+       memory { device_type = "memory"; reg = <0 0>; };
 
        aliases {
                ethernet0 = &fec;
index 3aabf65a6a5224f5681d6ba189087a550824e964..d6a2190b60ef4d52a19d87e140d878c826a1b7ad 100644 (file)
        #size-cells = <1>;
 
        interrupt-parent = <&icoll>;
+       /*
+        * The decompressor and also some bootloaders rely on a
+        * pre-existing /chosen node to be available to insert the
+        * command line and merge other ATAGS info.
+        * Also for U-Boot there must be a pre-existing /memory node.
+        */
+       chosen {};
+       memory { device_type = "memory"; reg = <0 0>; };
 
        aliases {
                ethernet0 = &mac0;
index 85cd8be22f7155edae2d56ac5a99984427aa6131..23b0d2cf9acdcde0180b50b0b7635a52ddd92ecc 100644 (file)
 / {
        #address-cells = <1>;
        #size-cells = <1>;
+       /*
+        * The decompressor and also some bootloaders rely on a
+        * pre-existing /chosen node to be available to insert the
+        * command line and merge other ATAGS info.
+        * Also for U-Boot there must be a pre-existing /memory node.
+        */
+       chosen {};
+       memory { device_type = "memory"; reg = <0 0>; };
 
        aliases {
                serial0 = &uart1;
index 9f40e6229189f3c6cb72c99bc11c351598e7f0e3..d0496c65cea2695772f0477b4d153c3bd3e6fa4d 100644 (file)
 / {
        #address-cells = <1>;
        #size-cells = <1>;
+       /*
+        * The decompressor and also some bootloaders rely on a
+        * pre-existing /chosen node to be available to insert the
+        * command line and merge other ATAGS info.
+        * Also for U-Boot there must be a pre-existing /memory node.
+        */
+       chosen {};
+       memory { device_type = "memory"; reg = <0 0>; };
 
        aliases {
                ethernet0 = &fec;
index fe0221e4cbf7b108f81c2695507a9450653b53f8..ceae909e2201f729e8ed64a2f9baaa3103d3d968 100644 (file)
 / {
        #address-cells = <1>;
        #size-cells = <1>;
+       /*
+        * The decompressor and also some bootloaders rely on a
+        * pre-existing /chosen node to be available to insert the
+        * command line and merge other ATAGS info.
+        * Also for U-Boot there must be a pre-existing /memory node.
+        */
+       chosen {};
+       memory { device_type = "memory"; reg = <0 0>; };
 
        aliases {
                ethernet0 = &fec;
index 33526cade73582766f68972301d349c72fccb9a6..1ee1d542d9ad088c0bb94a55d66829d6ac1d92b9 100644 (file)
 / {
        #address-cells = <1>;
        #size-cells = <1>;
+       /*
+        * The decompressor and also some bootloaders rely on a
+        * pre-existing /chosen node to be available to insert the
+        * command line and merge other ATAGS info.
+        * Also for U-Boot there must be a pre-existing /memory node.
+        */
+       chosen {};
+       memory { device_type = "memory"; reg = <0 0>; };
 
        aliases {
                ethernet0 = &fec;
index ca51dc03e327b3f89b3836ea8ba41d55359af58e..2e516f4985e4cd2e470f8ddb0195a34821a1d09d 100644 (file)
 / {
        #address-cells = <1>;
        #size-cells = <1>;
+       /*
+        * The decompressor and also some bootloaders rely on a
+        * pre-existing /chosen node to be available to insert the
+        * command line and merge other ATAGS info.
+        * Also for U-Boot there must be a pre-existing /memory node.
+        */
+       chosen {};
+       memory { device_type = "memory"; reg = <0 0>; };
 
        aliases {
                ethernet0 = &fec;
index 1ade1951e620da007041882b467ebfbe3e530a7c..7aa120fbdc71ea0e4a2776312bc8aedfe5617f16 100644 (file)
 &gpio4 {
        gpio-ranges = <&iomuxc  5 136 1>, <&iomuxc  6 145 1>, <&iomuxc  7 150 1>,
                      <&iomuxc  8 146 1>, <&iomuxc  9 151 1>, <&iomuxc 10 147 1>,
-                     <&iomuxc 11 151 1>, <&iomuxc 12 148 1>, <&iomuxc 13 153 1>,
+                     <&iomuxc 11 152 1>, <&iomuxc 12 148 1>, <&iomuxc 13 153 1>,
                      <&iomuxc 14 149 1>, <&iomuxc 15 154 1>, <&iomuxc 16  39 7>,
                      <&iomuxc 23  56 1>, <&iomuxc 24  61 7>, <&iomuxc 31  46 1>;
 };
index 89b834f3fa17f6b576e31fd61e8bfad205f73923..e7d30f45b161ebb09f9d904f690638ec46c20ce7 100644 (file)
 / {
        #address-cells = <1>;
        #size-cells = <1>;
+       /*
+        * The decompressor and also some bootloaders rely on a
+        * pre-existing /chosen node to be available to insert the
+        * command line and merge other ATAGS info.
+        * Also for U-Boot there must be a pre-existing /memory node.
+        */
+       chosen {};
+       memory { device_type = "memory"; reg = <0 0>; };
 
        aliases {
                ethernet0 = &fec;
index 19cbd879c448984717a83e1d819efdec822c4957..cc9572ea2860a5b619ce27bce26ed78de0c4c13c 100644 (file)
 / {
        #address-cells = <1>;
        #size-cells = <1>;
+       /*
+        * The decompressor and also some bootloaders rely on a
+        * pre-existing /chosen node to be available to insert the
+        * command line and merge other ATAGS info.
+        * Also for U-Boot there must be a pre-existing /memory node.
+        */
+       chosen {};
+       memory { device_type = "memory"; reg = <0 0>; };
 
        aliases {
                ethernet0 = &fec;
index 10f33301619777a9d676eb49bae893540d920973..dd4ec85ecbaaff534c2128997da4215e5b0f93bc 100644 (file)
 / {
        #address-cells = <1>;
        #size-cells = <1>;
+       /*
+        * The decompressor and also some bootloaders rely on a
+        * pre-existing /chosen node to be available to insert the
+        * command line and merge other ATAGS info.
+        * Also for U-Boot there must be a pre-existing /memory node.
+        */
+       chosen {};
+       memory { device_type = "memory"; reg = <0 0>; };
 
        aliases {
                can0 = &flexcan1;
index 39845a7e046303e6448e506d7199a49b7530de74..53d3f8e41e9b8e99a15888cb63a50fc19aa591d4 100644 (file)
 / {
        #address-cells = <1>;
        #size-cells = <1>;
+       /*
+        * The decompressor and also some bootloaders rely on a
+        * pre-existing /chosen node to be available to insert the
+        * command line and merge other ATAGS info.
+        * Also for U-Boot there must be a pre-existing /memory node.
+        */
+       chosen {};
+       memory { device_type = "memory"; reg = <0 0>; };
 
        aliases {
                ethernet0 = &fec1;
index 8ff2cbdd8f0df26e59805a12dcb0ee871cc3ca73..be33dfc86838ea16dd09201c60cfcdfe00d3c054 100644 (file)
 / {
        #address-cells = <1>;
        #size-cells = <1>;
+       /*
+        * The decompressor and also some bootloaders rely on a
+        * pre-existing /chosen node to be available to insert the
+        * command line and merge other ATAGS info.
+        * Also for U-Boot there must be a pre-existing /memory node.
+        */
+       chosen {};
+       memory { device_type = "memory"; reg = <0 0>; };
 
        aliases {
                gpio0 = &gpio1;
similarity index 98%
rename from arch/arm/boot/dts/orion5x-lschl.dts
rename to arch/arm/boot/dts/orion5x-linkstation-lschl.dts
index 94740925284587f8f40219e920ced179e67cac3a..ea6c881634b989cad90258e6eb1094dcf36ce8b6 100644 (file)
@@ -2,7 +2,7 @@
  * Device Tree file for Buffalo Linkstation LS-CHLv3
  *
  * Copyright (C) 2016 Ash Hughes <ashley.hughes@blueyonder.co.uk>
- * Copyright (C) 2015, 2016
+ * Copyright (C) 2015-2017
  * Roger Shimizu <rogershimizu@gmail.com>
  *
  * This file is dual-licensed: you can use it either under the terms
@@ -52,7 +52,7 @@
 #include <dt-bindings/gpio/gpio.h>
 
 / {
-       model = "Buffalo Linkstation Live v3 (LS-CHL)";
+       model = "Buffalo Linkstation LiveV3 (LS-CHL)";
        compatible = "buffalo,lschl", "marvell,orion5x-88f5182", "marvell,orion5x";
 
        memory { /* 128 MB */
index c8b2944e304ac6dfdfdacbc3d1b63ab12e7ad73f..ace97e8576dbd9ae34d112b4aa8ec991647134e5 100644 (file)
                                phy-names       = "usb2-phy", "usb3-phy";
                                phys            = <&usb2_picophy0>,
                                                  <&phy_port2 PHY_TYPE_USB3>;
+                               snps,dis_u3_susphy_quirk;
                        };
                };
 
index 79c415c33f693e9d6fb20389bac592aad3c62b66..809f0bf3042ae4ed90f069e68defd3b590044461 100644 (file)
@@ -24,7 +24,7 @@ CONFIG_ARM_APPENDED_DTB=y
 CONFIG_ARM_ATAG_DTB_COMPAT=y
 CONFIG_CMDLINE="root=/dev/ram0 rw ramdisk=8192 initrd=0x41000000,8M console=ttySAC1,115200 init=/linuxrc mem=256M"
 CONFIG_CPU_FREQ=y
-CONFIG_CPU_FREQ_STAT_DETAILS=y
+CONFIG_CPU_FREQ_STAT=y
 CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
 CONFIG_CPU_FREQ_GOV_POWERSAVE=m
 CONFIG_CPU_FREQ_GOV_USERSPACE=m
index ea316c4b890efadb31df9f90df5ae382c94d4228..d3f1768840e28aa6a4e1ccba8cb3313bfbe78324 100644 (file)
@@ -64,8 +64,8 @@ CONFIG_NETFILTER=y
 CONFIG_NETFILTER_NETLINK_QUEUE=m
 CONFIG_NF_CONNTRACK=m
 CONFIG_NF_CONNTRACK_EVENTS=y
-CONFIG_NF_CT_PROTO_SCTP=m
-CONFIG_NF_CT_PROTO_UDPLITE=m
+CONFIG_NF_CT_PROTO_SCTP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
 CONFIG_NF_CONNTRACK_AMANDA=m
 CONFIG_NF_CONNTRACK_FTP=m
 CONFIG_NF_CONNTRACK_H323=m
index 18e59feaa3071593936ca4f0ff3d3b021eea9b08..7f479cdb34797c36219fe37bf8cdac79e94c3b8b 100644 (file)
@@ -56,8 +56,8 @@ CONFIG_NETFILTER=y
 CONFIG_NETFILTER_NETLINK_QUEUE=m
 CONFIG_NF_CONNTRACK=m
 CONFIG_NF_CONNTRACK_EVENTS=y
-CONFIG_NF_CT_PROTO_SCTP=m
-CONFIG_NF_CT_PROTO_UDPLITE=m
+CONFIG_NF_CT_PROTO_SCTP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
 CONFIG_NF_CONNTRACK_AMANDA=m
 CONFIG_NF_CONNTRACK_FTP=m
 CONFIG_NF_CONNTRACK_H323=m
index 361686a362f1360a3d33182132c437cc120475a7..69a4bd13eea594352630cfcd81d9fae5b5ca385c 100644 (file)
@@ -58,7 +58,7 @@ CONFIG_ZBOOT_ROM_BSS=0x0
 CONFIG_ARM_APPENDED_DTB=y
 CONFIG_ARM_ATAG_DTB_COMPAT=y
 CONFIG_CPU_FREQ=y
-CONFIG_CPU_FREQ_STAT_DETAILS=y
+CONFIG_CPU_FREQ_STAT=y
 CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
 CONFIG_CPU_IDLE=y
 CONFIG_ARM_KIRKWOOD_CPUIDLE=y
index 028d2b70e3b5b882feb185d3b037d1c53a3d0983..2e7a63f35bd33fd0e0932b286e7d7a423a25a3e8 100644 (file)
@@ -132,7 +132,7 @@ CONFIG_ARM_ATAG_DTB_COMPAT=y
 CONFIG_KEXEC=y
 CONFIG_EFI=y
 CONFIG_CPU_FREQ=y
-CONFIG_CPU_FREQ_STAT_DETAILS=y
+CONFIG_CPU_FREQ_STAT=y
 CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
 CONFIG_CPU_FREQ_GOV_POWERSAVE=m
 CONFIG_CPU_FREQ_GOV_USERSPACE=m
@@ -824,6 +824,7 @@ CONFIG_QCOM_SMSM=y
 CONFIG_QCOM_WCNSS_CTRL=m
 CONFIG_ROCKCHIP_PM_DOMAINS=y
 CONFIG_COMMON_CLK_QCOM=y
+CONFIG_QCOM_CLK_RPM=y
 CONFIG_CHROME_PLATFORMS=y
 CONFIG_STAGING_BOARD=y
 CONFIG_CROS_EC_CHARDEV=m
index f7f6039419aa916b659250a328d8f82c0cf22552..4b598da0d086eb1fe5da1cc2a62d0f81fd737834 100644 (file)
@@ -44,7 +44,7 @@ CONFIG_ZBOOT_ROM_BSS=0x0
 CONFIG_ARM_APPENDED_DTB=y
 CONFIG_ARM_ATAG_DTB_COMPAT=y
 CONFIG_CPU_FREQ=y
-CONFIG_CPU_FREQ_STAT_DETAILS=y
+CONFIG_CPU_FREQ_STAT=y
 CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
 CONFIG_CPU_IDLE=y
 CONFIG_ARM_KIRKWOOD_CPUIDLE=y
index e4314b1227a34cc2324e8aaf5e3cb43f28b78721..271dc7e78e439a2da4c2c1718521cae7a8904259 100644 (file)
@@ -97,7 +97,7 @@ CONFIG_ZBOOT_ROM_BSS=0x0
 CONFIG_CMDLINE="root=/dev/ram0 ro"
 CONFIG_KEXEC=y
 CONFIG_CPU_FREQ=y
-CONFIG_CPU_FREQ_STAT_DETAILS=y
+CONFIG_CPU_FREQ_STAT=y
 CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
 CONFIG_CPU_FREQ_GOV_POWERSAVE=m
 CONFIG_CPU_FREQ_GOV_USERSPACE=m
index 1b0f8ae36fb30402b18d65b22e540978b147a2a8..adeaecd831a4d0ad8551918804f7760fd7e714f3 100644 (file)
@@ -38,7 +38,7 @@ CONFIG_ZBOOT_ROM_BSS=0x0
 CONFIG_ARM_APPENDED_DTB=y
 CONFIG_KEXEC=y
 CONFIG_CPU_FREQ=y
-CONFIG_CPU_FREQ_STAT_DETAILS=y
+CONFIG_CPU_FREQ_STAT=y
 CONFIG_CPU_FREQ_GOV_POWERSAVE=y
 CONFIG_CPU_FREQ_GOV_USERSPACE=y
 CONFIG_CPU_FREQ_GOV_ONDEMAND=y
index efb21757d41f6a28cfdd3f8ec6e63075866e94d3..b14e8c7d71bda52ed0a22ea516dea7200d2dce31 100644 (file)
@@ -2,7 +2,6 @@
 
 generic-y += bitsperlong.h
 generic-y += clkdev.h
-generic-y += cputime.h
 generic-y += current.h
 generic-y += early_ioremap.h
 generic-y += emergency-restart.h
index 0b06f5341b451533c349de4b8f04e277b415cfd9..e4e6a9d6a825274cc9d4f9706f44f783c9514660 100644 (file)
@@ -55,6 +55,7 @@ void efi_virtmap_unload(void);
 
 #define efi_call_early(f, ...)         sys_table_arg->boottime->f(__VA_ARGS__)
 #define __efi_call_early(f, ...)       f(__VA_ARGS__)
+#define efi_call_runtime(f, ...)       sys_table_arg->runtime->f(__VA_ARGS__)
 #define efi_is_64bit()                 (false)
 
 #define efi_call_proto(protocol, f, instance, ...)                     \
index 1f59ea051bab814132074b09f55d3a57c800a471..b7e0125c0bbf2014a447800a383426b62d5147b8 100644 (file)
@@ -478,11 +478,10 @@ extern unsigned long __must_check
 arm_copy_from_user(void *to, const void __user *from, unsigned long n);
 
 static inline unsigned long __must_check
-__copy_from_user(void *to, const void __user *from, unsigned long n)
+__arch_copy_from_user(void *to, const void __user *from, unsigned long n)
 {
        unsigned int __ua_flags;
 
-       check_object_size(to, n, false);
        __ua_flags = uaccess_save_and_enable();
        n = arm_copy_from_user(to, from, n);
        uaccess_restore(__ua_flags);
@@ -495,18 +494,15 @@ extern unsigned long __must_check
 __copy_to_user_std(void __user *to, const void *from, unsigned long n);
 
 static inline unsigned long __must_check
-__copy_to_user(void __user *to, const void *from, unsigned long n)
+__arch_copy_to_user(void __user *to, const void *from, unsigned long n)
 {
 #ifndef CONFIG_UACCESS_WITH_MEMCPY
        unsigned int __ua_flags;
-
-       check_object_size(from, n, true);
        __ua_flags = uaccess_save_and_enable();
        n = arm_copy_to_user(to, from, n);
        uaccess_restore(__ua_flags);
        return n;
 #else
-       check_object_size(from, n, true);
        return arm_copy_to_user(to, from, n);
 #endif
 }
@@ -526,25 +522,49 @@ __clear_user(void __user *addr, unsigned long n)
 }
 
 #else
-#define __copy_from_user(to, from, n)  (memcpy(to, (void __force *)from, n), 0)
-#define __copy_to_user(to, from, n)    (memcpy((void __force *)to, from, n), 0)
+#define __arch_copy_from_user(to, from, n)     \
+                                       (memcpy(to, (void __force *)from, n), 0)
+#define __arch_copy_to_user(to, from, n)       \
+                                       (memcpy((void __force *)to, from, n), 0)
 #define __clear_user(addr, n)          (memset((void __force *)addr, 0, n), 0)
 #endif
 
-static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
+static inline unsigned long __must_check
+__copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+       check_object_size(to, n, false);
+       return __arch_copy_from_user(to, from, n);
+}
+
+static inline unsigned long __must_check
+copy_from_user(void *to, const void __user *from, unsigned long n)
 {
        unsigned long res = n;
+
+       check_object_size(to, n, false);
+
        if (likely(access_ok(VERIFY_READ, from, n)))
-               res = __copy_from_user(to, from, n);
+               res = __arch_copy_from_user(to, from, n);
        if (unlikely(res))
                memset(to + (n - res), 0, res);
        return res;
 }
 
-static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
+static inline unsigned long __must_check
+__copy_to_user(void __user *to, const void *from, unsigned long n)
 {
+       check_object_size(from, n, true);
+
+       return __arch_copy_to_user(to, from, n);
+}
+
+static inline unsigned long __must_check
+copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+       check_object_size(from, n, true);
+
        if (access_ok(VERIFY_WRITE, to, n))
-               n = __copy_to_user(to, from, n);
+               n = __arch_copy_to_user(to, from, n);
        return n;
 }
 
index ce131ed5939d5ff86054afc3a14de7cb51225a79..ae738a6319f6a341c05a3c6a2dee2da30aa02a40 100644 (file)
@@ -600,7 +600,7 @@ static int gpr_set(struct task_struct *target,
                   const void *kbuf, const void __user *ubuf)
 {
        int ret;
-       struct pt_regs newregs;
+       struct pt_regs newregs = *task_pt_regs(target);
 
        ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
                                 &newregs,
index 8ecfd15c3a0248db29667fe3dc6ec6429fc9fc7c..df73914e81c8344feccac5df8d5791dcbe92ed60 100644 (file)
@@ -67,7 +67,7 @@ ENTRY(__get_user_4)
 ENDPROC(__get_user_4)
 
 ENTRY(__get_user_8)
-       check_uaccess r0, 8, r1, r2, __get_user_bad
+       check_uaccess r0, 8, r1, r2, __get_user_bad8
 #ifdef CONFIG_THUMB2_KERNEL
 5: TUSER(ldr)  r2, [r0]
 6: TUSER(ldr)  r3, [r0, #4]
index ad92d9f7e4df3e09b305bb40520d58c081b27eb4..0ac176386789428be26631ad1c9f75ad343e8951 100644 (file)
@@ -27,7 +27,6 @@
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/platform_device.h>
-#include <linux/gpio.h>
 #include <linux/i2c.h>
 #include <linux/i2c-gpio.h>
 #include <linux/spi/spi.h>
@@ -106,33 +105,10 @@ static struct cs4271_platform_data edb93xx_cs4271_data = {
        .gpio_nreset    = -EINVAL,      /* filled in later */
 };
 
-static int edb93xx_cs4271_hw_setup(struct spi_device *spi)
-{
-       return gpio_request_one(EP93XX_GPIO_LINE_EGPIO6,
-                               GPIOF_OUT_INIT_HIGH, spi->modalias);
-}
-
-static void edb93xx_cs4271_hw_cleanup(struct spi_device *spi)
-{
-       gpio_free(EP93XX_GPIO_LINE_EGPIO6);
-}
-
-static void edb93xx_cs4271_hw_cs_control(struct spi_device *spi, int value)
-{
-       gpio_set_value(EP93XX_GPIO_LINE_EGPIO6, value);
-}
-
-static struct ep93xx_spi_chip_ops edb93xx_cs4271_hw = {
-       .setup          = edb93xx_cs4271_hw_setup,
-       .cleanup        = edb93xx_cs4271_hw_cleanup,
-       .cs_control     = edb93xx_cs4271_hw_cs_control,
-};
-
 static struct spi_board_info edb93xx_spi_board_info[] __initdata = {
        {
                .modalias               = "cs4271",
                .platform_data          = &edb93xx_cs4271_data,
-               .controller_data        = &edb93xx_cs4271_hw,
                .max_speed_hz           = 6000000,
                .bus_num                = 0,
                .chip_select            = 0,
@@ -140,8 +116,13 @@ static struct spi_board_info edb93xx_spi_board_info[] __initdata = {
        },
 };
 
+static int edb93xx_spi_chipselects[] __initdata = {
+       EP93XX_GPIO_LINE_EGPIO6,
+};
+
 static struct ep93xx_spi_info edb93xx_spi_info __initdata = {
-       .num_chipselect = ARRAY_SIZE(edb93xx_spi_board_info),
+       .chipselect     = edb93xx_spi_chipselects,
+       .num_chipselect = ARRAY_SIZE(edb93xx_spi_chipselects),
 };
 
 static void __init edb93xx_register_spi(void)
index 7bb540c421ee30314b93eb8c0f5247070b349b19..c7a40f245892885b403621e40bb33b4405111d5d 100644 (file)
@@ -48,56 +48,6 @@ static struct ep93xxfb_mach_info __initdata simone_fb_info = {
  */
 #define MMC_CARD_DETECT_GPIO EP93XX_GPIO_LINE_EGPIO0
 
-/*
- * Up to v1.3, the Sim.One used SFRMOUT as SD card chip select, but this goes
- * low between multi-message command blocks. From v1.4, it uses a GPIO instead.
- * v1.3 parts will still work, since the signal on SFRMOUT is automatic.
- */
-#define MMC_CHIP_SELECT_GPIO EP93XX_GPIO_LINE_EGPIO1
-
-/*
- * MMC SPI chip select GPIO handling. If you are using SFRMOUT (SFRM1) signal,
- * you can leave these empty and pass NULL as .controller_data.
- */
-
-static int simone_mmc_spi_setup(struct spi_device *spi)
-{
-       unsigned int gpio = MMC_CHIP_SELECT_GPIO;
-       int err;
-
-       err = gpio_request(gpio, spi->modalias);
-       if (err)
-               return err;
-
-       err = gpio_direction_output(gpio, 1);
-       if (err) {
-               gpio_free(gpio);
-               return err;
-       }
-
-       return 0;
-}
-
-static void simone_mmc_spi_cleanup(struct spi_device *spi)
-{
-       unsigned int gpio = MMC_CHIP_SELECT_GPIO;
-
-       gpio_set_value(gpio, 1);
-       gpio_direction_input(gpio);
-       gpio_free(gpio);
-}
-
-static void simone_mmc_spi_cs_control(struct spi_device *spi, int value)
-{
-       gpio_set_value(MMC_CHIP_SELECT_GPIO, value);
-}
-
-static struct ep93xx_spi_chip_ops simone_mmc_spi_ops = {
-       .setup          = simone_mmc_spi_setup,
-       .cleanup        = simone_mmc_spi_cleanup,
-       .cs_control     = simone_mmc_spi_cs_control,
-};
-
 /*
  * MMC card detection GPIO setup.
  */
@@ -152,7 +102,6 @@ static struct mmc_spi_platform_data simone_mmc_spi_data = {
 static struct spi_board_info simone_spi_devices[] __initdata = {
        {
                .modalias               = "mmc_spi",
-               .controller_data        = &simone_mmc_spi_ops,
                .platform_data          = &simone_mmc_spi_data,
                /*
                 * We use 10 MHz even though the maximum is 3.7 MHz. The driver
@@ -165,8 +114,18 @@ static struct spi_board_info simone_spi_devices[] __initdata = {
        },
 };
 
+/*
+ * Up to v1.3, the Sim.One used SFRMOUT as SD card chip select, but this goes
+ * low between multi-message command blocks. From v1.4, it uses a GPIO instead.
+ * v1.3 parts will still work, since the signal on SFRMOUT is automatic.
+ */
+static int simone_spi_chipselects[] __initdata = {
+       EP93XX_GPIO_LINE_EGPIO1,
+};
+
 static struct ep93xx_spi_info simone_spi_info __initdata = {
-       .num_chipselect = ARRAY_SIZE(simone_spi_devices),
+       .chipselect     = simone_spi_chipselects,
+       .num_chipselect = ARRAY_SIZE(simone_spi_chipselects),
        .use_dma = 1,
 };
 
index 5cced5988498f7c7fbe065b42a065e37b8f6b703..1daf9441058c8ded557463daad4f7dcef44381bd 100644 (file)
@@ -175,33 +175,9 @@ static struct cs4271_platform_data vision_cs4271_data = {
        .gpio_nreset    = EP93XX_GPIO_LINE_H(2),
 };
 
-static int vision_cs4271_hw_setup(struct spi_device *spi)
-{
-       return gpio_request_one(EP93XX_GPIO_LINE_EGPIO6,
-                               GPIOF_OUT_INIT_HIGH, spi->modalias);
-}
-
-static void vision_cs4271_hw_cleanup(struct spi_device *spi)
-{
-       gpio_free(EP93XX_GPIO_LINE_EGPIO6);
-}
-
-static void vision_cs4271_hw_cs_control(struct spi_device *spi, int value)
-{
-       gpio_set_value(EP93XX_GPIO_LINE_EGPIO6, value);
-}
-
-static struct ep93xx_spi_chip_ops vision_cs4271_hw = {
-       .setup          = vision_cs4271_hw_setup,
-       .cleanup        = vision_cs4271_hw_cleanup,
-       .cs_control     = vision_cs4271_hw_cs_control,
-};
-
 /*************************************************************************
  * SPI Flash
  *************************************************************************/
-#define VISION_SPI_FLASH_CS    EP93XX_GPIO_LINE_EGPIO7
-
 static struct mtd_partition vision_spi_flash_partitions[] = {
        {
                .name   = "SPI bootstrap",
@@ -224,68 +200,20 @@ static struct flash_platform_data vision_spi_flash_data = {
        .nr_parts       = ARRAY_SIZE(vision_spi_flash_partitions),
 };
 
-static int vision_spi_flash_hw_setup(struct spi_device *spi)
-{
-       return gpio_request_one(VISION_SPI_FLASH_CS, GPIOF_INIT_HIGH,
-                               spi->modalias);
-}
-
-static void vision_spi_flash_hw_cleanup(struct spi_device *spi)
-{
-       gpio_free(VISION_SPI_FLASH_CS);
-}
-
-static void vision_spi_flash_hw_cs_control(struct spi_device *spi, int value)
-{
-       gpio_set_value(VISION_SPI_FLASH_CS, value);
-}
-
-static struct ep93xx_spi_chip_ops vision_spi_flash_hw = {
-       .setup          = vision_spi_flash_hw_setup,
-       .cleanup        = vision_spi_flash_hw_cleanup,
-       .cs_control     = vision_spi_flash_hw_cs_control,
-};
-
 /*************************************************************************
  * SPI SD/MMC host
  *************************************************************************/
-#define VISION_SPI_MMC_CS      EP93XX_GPIO_LINE_G(2)
-#define VISION_SPI_MMC_WP      EP93XX_GPIO_LINE_F(0)
-#define VISION_SPI_MMC_CD      EP93XX_GPIO_LINE_EGPIO15
-
 static struct mmc_spi_platform_data vision_spi_mmc_data = {
        .detect_delay   = 100,
        .powerup_msecs  = 100,
        .ocr_mask       = MMC_VDD_32_33 | MMC_VDD_33_34,
        .flags          = MMC_SPI_USE_CD_GPIO | MMC_SPI_USE_RO_GPIO,
-       .cd_gpio        = VISION_SPI_MMC_CD,
+       .cd_gpio        = EP93XX_GPIO_LINE_EGPIO15,
        .cd_debounce    = 1,
-       .ro_gpio        = VISION_SPI_MMC_WP,
+       .ro_gpio        = EP93XX_GPIO_LINE_F(0),
        .caps2          = MMC_CAP2_RO_ACTIVE_HIGH,
 };
 
-static int vision_spi_mmc_hw_setup(struct spi_device *spi)
-{
-       return gpio_request_one(VISION_SPI_MMC_CS, GPIOF_INIT_HIGH,
-                               spi->modalias);
-}
-
-static void vision_spi_mmc_hw_cleanup(struct spi_device *spi)
-{
-       gpio_free(VISION_SPI_MMC_CS);
-}
-
-static void vision_spi_mmc_hw_cs_control(struct spi_device *spi, int value)
-{
-       gpio_set_value(VISION_SPI_MMC_CS, value);
-}
-
-static struct ep93xx_spi_chip_ops vision_spi_mmc_hw = {
-       .setup          = vision_spi_mmc_hw_setup,
-       .cleanup        = vision_spi_mmc_hw_cleanup,
-       .cs_control     = vision_spi_mmc_hw_cs_control,
-};
-
 /*************************************************************************
  * SPI Bus
  *************************************************************************/
@@ -293,7 +221,6 @@ static struct spi_board_info vision_spi_board_info[] __initdata = {
        {
                .modalias               = "cs4271",
                .platform_data          = &vision_cs4271_data,
-               .controller_data        = &vision_cs4271_hw,
                .max_speed_hz           = 6000000,
                .bus_num                = 0,
                .chip_select            = 0,
@@ -301,7 +228,6 @@ static struct spi_board_info vision_spi_board_info[] __initdata = {
        }, {
                .modalias               = "sst25l",
                .platform_data          = &vision_spi_flash_data,
-               .controller_data        = &vision_spi_flash_hw,
                .max_speed_hz           = 20000000,
                .bus_num                = 0,
                .chip_select            = 1,
@@ -309,7 +235,6 @@ static struct spi_board_info vision_spi_board_info[] __initdata = {
        }, {
                .modalias               = "mmc_spi",
                .platform_data          = &vision_spi_mmc_data,
-               .controller_data        = &vision_spi_mmc_hw,
                .max_speed_hz           = 20000000,
                .bus_num                = 0,
                .chip_select            = 2,
@@ -317,8 +242,15 @@ static struct spi_board_info vision_spi_board_info[] __initdata = {
        },
 };
 
+static int vision_spi_chipselects[] __initdata = {
+       EP93XX_GPIO_LINE_EGPIO6,
+       EP93XX_GPIO_LINE_EGPIO7,
+       EP93XX_GPIO_LINE_G(2),
+};
+
 static struct ep93xx_spi_info vision_spi_master __initdata = {
-       .num_chipselect = ARRAY_SIZE(vision_spi_board_info),
+       .chipselect     = vision_spi_chipselects,
+       .num_chipselect = ARRAY_SIZE(vision_spi_chipselects),
        .use_dma        = 1,
 };
 
index 699157759120f13ed4047b05609e53cb419fffe5..c03bf28d8bbc9a1a3f2be4187465c3cb674e25f1 100644 (file)
@@ -60,7 +60,6 @@
 
 #define to_mmdc_pmu(p) container_of(p, struct mmdc_pmu, pmu)
 
-static enum cpuhp_state cpuhp_mmdc_state;
 static int ddr_type;
 
 struct fsl_mmdc_devtype_data {
@@ -82,6 +81,7 @@ static const struct of_device_id imx_mmdc_dt_ids[] = {
 
 #ifdef CONFIG_PERF_EVENTS
 
+static enum cpuhp_state cpuhp_mmdc_state;
 static DEFINE_IDA(mmdc_ida);
 
 PMU_EVENT_ATTR_STRING(total-cycles, mmdc_pmu_total_cycles, "event=0x00")
index 2bb4b09f079e2e6496677f73837e56f1d100a819..ad7d604ff0013268eab461bd2f37ddc8d019879a 100644 (file)
@@ -57,6 +57,7 @@ config ARCH_R7S72100
        select PM
        select PM_GENERIC_DOMAINS
        select SYS_SUPPORTS_SH_MTU2
+       select RENESAS_OSTM
 
 config ARCH_R8A73A4
        bool "R-Mobile APE6 (R8A73A40)"
index ab7710002ba60e99287beb41e689e3ae4d148d6e..82d3e79ec82b231587b6002fa0aa15e2d70818c1 100644 (file)
@@ -1171,6 +1171,25 @@ core_initcall(dma_debug_do_init);
 
 #ifdef CONFIG_ARM_DMA_USE_IOMMU
 
+static int __dma_info_to_prot(enum dma_data_direction dir, unsigned long attrs)
+{
+       int prot = 0;
+
+       if (attrs & DMA_ATTR_PRIVILEGED)
+               prot |= IOMMU_PRIV;
+
+       switch (dir) {
+       case DMA_BIDIRECTIONAL:
+               return prot | IOMMU_READ | IOMMU_WRITE;
+       case DMA_TO_DEVICE:
+               return prot | IOMMU_READ;
+       case DMA_FROM_DEVICE:
+               return prot | IOMMU_WRITE;
+       default:
+               return prot;
+       }
+}
+
 /* IOMMU */
 
 static int extend_iommu_mapping(struct dma_iommu_mapping *mapping);
@@ -1394,7 +1413,8 @@ __iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot,
  * Create a mapping in device IO address space for specified pages
  */
 static dma_addr_t
-__iommu_create_mapping(struct device *dev, struct page **pages, size_t size)
+__iommu_create_mapping(struct device *dev, struct page **pages, size_t size,
+                      unsigned long attrs)
 {
        struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
        unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
@@ -1419,7 +1439,7 @@ __iommu_create_mapping(struct device *dev, struct page **pages, size_t size)
 
                len = (j - i) << PAGE_SHIFT;
                ret = iommu_map(mapping->domain, iova, phys, len,
-                               IOMMU_READ|IOMMU_WRITE);
+                               __dma_info_to_prot(DMA_BIDIRECTIONAL, attrs));
                if (ret < 0)
                        goto fail;
                iova += len;
@@ -1476,7 +1496,8 @@ static struct page **__iommu_get_pages(void *cpu_addr, unsigned long attrs)
 }
 
 static void *__iommu_alloc_simple(struct device *dev, size_t size, gfp_t gfp,
-                                 dma_addr_t *handle, int coherent_flag)
+                                 dma_addr_t *handle, int coherent_flag,
+                                 unsigned long attrs)
 {
        struct page *page;
        void *addr;
@@ -1488,7 +1509,7 @@ static void *__iommu_alloc_simple(struct device *dev, size_t size, gfp_t gfp,
        if (!addr)
                return NULL;
 
-       *handle = __iommu_create_mapping(dev, &page, size);
+       *handle = __iommu_create_mapping(dev, &page, size, attrs);
        if (*handle == DMA_ERROR_CODE)
                goto err_mapping;
 
@@ -1522,7 +1543,7 @@ static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size,
 
        if (coherent_flag  == COHERENT || !gfpflags_allow_blocking(gfp))
                return __iommu_alloc_simple(dev, size, gfp, handle,
-                                           coherent_flag);
+                                           coherent_flag, attrs);
 
        /*
         * Following is a work-around (a.k.a. hack) to prevent pages
@@ -1537,7 +1558,7 @@ static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size,
        if (!pages)
                return NULL;
 
-       *handle = __iommu_create_mapping(dev, pages, size);
+       *handle = __iommu_create_mapping(dev, pages, size, attrs);
        if (*handle == DMA_ERROR_CODE)
                goto err_buffer;
 
@@ -1672,27 +1693,6 @@ static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
                                         GFP_KERNEL);
 }
 
-static int __dma_direction_to_prot(enum dma_data_direction dir)
-{
-       int prot;
-
-       switch (dir) {
-       case DMA_BIDIRECTIONAL:
-               prot = IOMMU_READ | IOMMU_WRITE;
-               break;
-       case DMA_TO_DEVICE:
-               prot = IOMMU_READ;
-               break;
-       case DMA_FROM_DEVICE:
-               prot = IOMMU_WRITE;
-               break;
-       default:
-               prot = 0;
-       }
-
-       return prot;
-}
-
 /*
  * Map a part of the scatter-gather list into contiguous io address space
  */
@@ -1722,7 +1722,7 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
                if (!is_coherent && (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
                        __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
 
-               prot = __dma_direction_to_prot(dir);
+               prot = __dma_info_to_prot(dir, attrs);
 
                ret = iommu_map(mapping->domain, iova, phys, len, prot);
                if (ret < 0)
@@ -1930,7 +1930,7 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *p
        if (dma_addr == DMA_ERROR_CODE)
                return dma_addr;
 
-       prot = __dma_direction_to_prot(dir);
+       prot = __dma_info_to_prot(dir, attrs);
 
        ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot);
        if (ret < 0)
@@ -2036,7 +2036,7 @@ static dma_addr_t arm_iommu_map_resource(struct device *dev,
        if (dma_addr == DMA_ERROR_CODE)
                return dma_addr;
 
-       prot = __dma_direction_to_prot(dir) | IOMMU_MMIO;
+       prot = __dma_info_to_prot(dir, attrs) | IOMMU_MMIO;
 
        ret = iommu_map(mapping->domain, dma_addr, addr, len, prot);
        if (ret < 0)
index 3a2e678b8d30cabfb058fd82bb1d3336e3dab02d..0122ad1a60270cda8c53faf69296b8a93a902851 100644 (file)
@@ -610,9 +610,9 @@ static int __init early_abort_handler(unsigned long addr, unsigned int fsr,
 
 void __init early_abt_enable(void)
 {
-       fsr_info[22].fn = early_abort_handler;
+       fsr_info[FSR_FS_AEA].fn = early_abort_handler;
        local_abt_enable();
-       fsr_info[22].fn = do_bad;
+       fsr_info[FSR_FS_AEA].fn = do_bad;
 }
 
 #ifndef CONFIG_ARM_LPAE
index 67532f24227105c02f8d5a5be13ee46879aac237..afc1f84e763b248b2193715e757d432cc055eac8 100644 (file)
 #define FSR_FS5_0              (0x3f)
 
 #ifdef CONFIG_ARM_LPAE
+#define FSR_FS_AEA             17
+
 static inline int fsr_fs(unsigned int fsr)
 {
        return fsr & FSR_FS5_0;
 }
 #else
+#define FSR_FS_AEA             22
+
 static inline int fsr_fs(unsigned int fsr)
 {
        return (fsr & FSR_FS3_0) | (fsr & FSR_FS4) >> 6;
index 1117421268976b62dd4e77cd3a523c10070f3ace..f7dfd6d5865977fe23c156e47c3c5f49775362b6 100644 (file)
@@ -96,7 +96,7 @@ config ARM64
        select HAVE_RCU_TABLE_FREE
        select HAVE_SYSCALL_TRACEPOINTS
        select HAVE_KPROBES
-       select HAVE_KRETPROBES if HAVE_KPROBES
+       select HAVE_KRETPROBES
        select IOMMU_DMA if IOMMU_SUPPORT
        select IRQ_DOMAIN
        select IRQ_FORCED_THREADING
index eada0b58ba1c7637d46fffaf7eadaf37380d41e8..0cbe24b49710fd4057aec2da23ccbf3830ca4b92 100644 (file)
        #address-cells = <2>;
        #size-cells = <2>;
 
+       reserved-memory {
+               #address-cells = <2>;
+               #size-cells = <2>;
+               ranges;
+
+               /* 16 MiB reserved for Hardware ROM Firmware */
+               hwrom_reserved: hwrom@0 {
+                       reg = <0x0 0x0 0x0 0x1000000>;
+                       no-map;
+               };
+
+               /* 2 MiB reserved for ARM Trusted Firmware (BL31) */
+               secmon_reserved: secmon@10000000 {
+                       reg = <0x0 0x10000000 0x0 0x200000>;
+                       no-map;
+               };
+       };
+
        cpus {
                #address-cells = <0x2>;
                #size-cells = <0x0>;
index 5d28e1cdc9986a18de73efc9c3a988c4556f1d88..c59403adb387dbcd33d375561ee3eaa07ebdfa3f 100644 (file)
        status = "okay";
        pinctrl-0 = <&eth_rgmii_pins>;
        pinctrl-names = "default";
+       phy-handle = <&eth_phy0>;
+
+       mdio {
+               compatible = "snps,dwmac-mdio";
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               eth_phy0: ethernet-phy@0 {
+                       reg = <0>;
+                       eee-broken-1000t;
+               };
+       };
 };
 
 &ir {
index c53dbeae79f2f5fce8353b169e96ff6c79294aa5..838dad5c209fae0f3a660e79d1f5fef8eb1f0c68 100644 (file)
@@ -193,15 +193,16 @@ AES_ENTRY(aes_cbc_encrypt)
        cbz             w6, .Lcbcencloop
 
        ld1             {v0.16b}, [x5]                  /* get iv */
-       enc_prepare     w3, x2, x5
+       enc_prepare     w3, x2, x6
 
 .Lcbcencloop:
        ld1             {v1.16b}, [x1], #16             /* get next pt block */
        eor             v0.16b, v0.16b, v1.16b          /* ..and xor with iv */
-       encrypt_block   v0, w3, x2, x5, w6
+       encrypt_block   v0, w3, x2, x6, w7
        st1             {v0.16b}, [x0], #16
        subs            w4, w4, #1
        bne             .Lcbcencloop
+       st1             {v0.16b}, [x5]                  /* return iv */
        ret
 AES_ENDPROC(aes_cbc_encrypt)
 
@@ -211,7 +212,7 @@ AES_ENTRY(aes_cbc_decrypt)
        cbz             w6, .LcbcdecloopNx
 
        ld1             {v7.16b}, [x5]                  /* get iv */
-       dec_prepare     w3, x2, x5
+       dec_prepare     w3, x2, x6
 
 .LcbcdecloopNx:
 #if INTERLEAVE >= 2
@@ -248,7 +249,7 @@ AES_ENTRY(aes_cbc_decrypt)
 .Lcbcdecloop:
        ld1             {v1.16b}, [x1], #16             /* get next ct block */
        mov             v0.16b, v1.16b                  /* ...and copy to v0 */
-       decrypt_block   v0, w3, x2, x5, w6
+       decrypt_block   v0, w3, x2, x6, w7
        eor             v0.16b, v0.16b, v7.16b          /* xor with iv => pt */
        mov             v7.16b, v1.16b                  /* ct is next iv */
        st1             {v0.16b}, [x0], #16
@@ -256,6 +257,7 @@ AES_ENTRY(aes_cbc_decrypt)
        bne             .Lcbcdecloop
 .Lcbcdecout:
        FRAME_POP
+       st1             {v7.16b}, [x5]                  /* return iv */
        ret
 AES_ENDPROC(aes_cbc_decrypt)
 
@@ -267,24 +269,15 @@ AES_ENDPROC(aes_cbc_decrypt)
 
 AES_ENTRY(aes_ctr_encrypt)
        FRAME_PUSH
-       cbnz            w6, .Lctrfirst          /* 1st time around? */
-       umov            x5, v4.d[1]             /* keep swabbed ctr in reg */
-       rev             x5, x5
-#if INTERLEAVE >= 2
-       cmn             w5, w4                  /* 32 bit overflow? */
-       bcs             .Lctrinc
-       add             x5, x5, #1              /* increment BE ctr */
-       b               .LctrincNx
-#else
-       b               .Lctrinc
-#endif
-.Lctrfirst:
+       cbz             w6, .Lctrnotfirst       /* 1st time around? */
        enc_prepare     w3, x2, x6
        ld1             {v4.16b}, [x5]
-       umov            x5, v4.d[1]             /* keep swabbed ctr in reg */
-       rev             x5, x5
+
+.Lctrnotfirst:
+       umov            x8, v4.d[1]             /* keep swabbed ctr in reg */
+       rev             x8, x8
 #if INTERLEAVE >= 2
-       cmn             w5, w4                  /* 32 bit overflow? */
+       cmn             w8, w4                  /* 32 bit overflow? */
        bcs             .Lctrloop
 .LctrloopNx:
        subs            w4, w4, #INTERLEAVE
@@ -292,11 +285,11 @@ AES_ENTRY(aes_ctr_encrypt)
 #if INTERLEAVE == 2
        mov             v0.8b, v4.8b
        mov             v1.8b, v4.8b
-       rev             x7, x5
-       add             x5, x5, #1
+       rev             x7, x8
+       add             x8, x8, #1
        ins             v0.d[1], x7
-       rev             x7, x5
-       add             x5, x5, #1
+       rev             x7, x8
+       add             x8, x8, #1
        ins             v1.d[1], x7
        ld1             {v2.16b-v3.16b}, [x1], #32      /* get 2 input blocks */
        do_encrypt_block2x
@@ -305,7 +298,7 @@ AES_ENTRY(aes_ctr_encrypt)
        st1             {v0.16b-v1.16b}, [x0], #32
 #else
        ldr             q8, =0x30000000200000001        /* addends 1,2,3[,0] */
-       dup             v7.4s, w5
+       dup             v7.4s, w8
        mov             v0.16b, v4.16b
        add             v7.4s, v7.4s, v8.4s
        mov             v1.16b, v4.16b
@@ -323,18 +316,12 @@ AES_ENTRY(aes_ctr_encrypt)
        eor             v2.16b, v7.16b, v2.16b
        eor             v3.16b, v5.16b, v3.16b
        st1             {v0.16b-v3.16b}, [x0], #64
-       add             x5, x5, #INTERLEAVE
+       add             x8, x8, #INTERLEAVE
 #endif
-       cbz             w4, .LctroutNx
-.LctrincNx:
-       rev             x7, x5
+       rev             x7, x8
        ins             v4.d[1], x7
+       cbz             w4, .Lctrout
        b               .LctrloopNx
-.LctroutNx:
-       sub             x5, x5, #1
-       rev             x7, x5
-       ins             v4.d[1], x7
-       b               .Lctrout
 .Lctr1x:
        adds            w4, w4, #INTERLEAVE
        beq             .Lctrout
@@ -342,30 +329,39 @@ AES_ENTRY(aes_ctr_encrypt)
 .Lctrloop:
        mov             v0.16b, v4.16b
        encrypt_block   v0, w3, x2, x6, w7
+
+       adds            x8, x8, #1              /* increment BE ctr */
+       rev             x7, x8
+       ins             v4.d[1], x7
+       bcs             .Lctrcarry              /* overflow? */
+
+.Lctrcarrydone:
        subs            w4, w4, #1
        bmi             .Lctrhalfblock          /* blocks < 0 means 1/2 block */
        ld1             {v3.16b}, [x1], #16
        eor             v3.16b, v0.16b, v3.16b
        st1             {v3.16b}, [x0], #16
-       beq             .Lctrout
-.Lctrinc:
-       adds            x5, x5, #1              /* increment BE ctr */
-       rev             x7, x5
-       ins             v4.d[1], x7
-       bcc             .Lctrloop               /* no overflow? */
-       umov            x7, v4.d[0]             /* load upper word of ctr  */
-       rev             x7, x7                  /* ... to handle the carry */
-       add             x7, x7, #1
-       rev             x7, x7
-       ins             v4.d[0], x7
-       b               .Lctrloop
+       bne             .Lctrloop
+
+.Lctrout:
+       st1             {v4.16b}, [x5]          /* return next CTR value */
+       FRAME_POP
+       ret
+
 .Lctrhalfblock:
        ld1             {v3.8b}, [x1]
        eor             v3.8b, v0.8b, v3.8b
        st1             {v3.8b}, [x0]
-.Lctrout:
        FRAME_POP
        ret
+
+.Lctrcarry:
+       umov            x7, v4.d[0]             /* load upper word of ctr  */
+       rev             x7, x7                  /* ... to handle the carry */
+       add             x7, x7, #1
+       rev             x7, x7
+       ins             v4.d[0], x7
+       b               .Lctrcarrydone
 AES_ENDPROC(aes_ctr_encrypt)
        .ltorg
 
index 8365a84c2640e1363b61918e1987c6f8f07f2ecd..a12f1afc95a391b182b14e33af2f71b905ae6e5c 100644 (file)
@@ -1,6 +1,5 @@
 generic-y += bugs.h
 generic-y += clkdev.h
-generic-y += cputime.h
 generic-y += delay.h
 generic-y += div64.h
 generic-y += dma.h
index eaa5bbe3fa8750f78b206b188e46ecee09f70de1..b4b34004a21e2a8ee9514d55f1cd0b90db70bc03 100644 (file)
 
 #include <clocksource/arm_arch_timer.h>
 
-#if IS_ENABLED(CONFIG_FSL_ERRATUM_A008585)
+#if IS_ENABLED(CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND)
 extern struct static_key_false arch_timer_read_ool_enabled;
-#define needs_fsl_a008585_workaround() \
+#define needs_unstable_timer_counter_workaround() \
        static_branch_unlikely(&arch_timer_read_ool_enabled)
 #else
-#define needs_fsl_a008585_workaround()  false
+#define needs_unstable_timer_counter_workaround()  false
 #endif
 
-u32 __fsl_a008585_read_cntp_tval_el0(void);
-u32 __fsl_a008585_read_cntv_tval_el0(void);
-u64 __fsl_a008585_read_cntvct_el0(void);
 
-/*
- * The number of retries is an arbitrary value well beyond the highest number
- * of iterations the loop has been observed to take.
- */
-#define __fsl_a008585_read_reg(reg) ({                 \
-       u64 _old, _new;                                 \
-       int _retries = 200;                             \
-                                                       \
-       do {                                            \
-               _old = read_sysreg(reg);                \
-               _new = read_sysreg(reg);                \
-               _retries--;                             \
-       } while (unlikely(_old != _new) && _retries);   \
-                                                       \
-       WARN_ON_ONCE(!_retries);                        \
-       _new;                                           \
-})
+struct arch_timer_erratum_workaround {
+       const char *id;         /* Indicate the Erratum ID */
+       u32 (*read_cntp_tval_el0)(void);
+       u32 (*read_cntv_tval_el0)(void);
+       u64 (*read_cntvct_el0)(void);
+};
+
+extern const struct arch_timer_erratum_workaround *timer_unstable_counter_workaround;
 
 #define arch_timer_reg_read_stable(reg)                \
 ({                                                     \
        u64 _val;                                       \
-       if (needs_fsl_a008585_workaround())             \
-               _val = __fsl_a008585_read_##reg();      \
+       if (needs_unstable_timer_counter_workaround())          \
+               _val = timer_unstable_counter_workaround->read_##reg();\
        else                                            \
                _val = read_sysreg(reg);                \
        _val;                                           \
index 0b6b1633017fc3f859d9375d30d83e5bae47bb2a..e7445281e5342bcd248cd9ab684b2747a52bd080 100644 (file)
@@ -50,6 +50,7 @@ int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md);
 
 #define efi_call_early(f, ...)         sys_table_arg->boottime->f(__VA_ARGS__)
 #define __efi_call_early(f, ...)       f(__VA_ARGS__)
+#define efi_call_runtime(f, ...)       sys_table_arg->runtime->f(__VA_ARGS__)
 #define efi_is_64bit()                 (true)
 
 #define efi_call_proto(protocol, f, instance, ...)                     \
index 23e9e13bd2aa797840fcd79e2d61e747ebbe42f6..655e65f38f315758b617d6aaa89f8feed63b496a 100644 (file)
@@ -11,6 +11,7 @@
  * for more details.
  */
 
+#include <linux/acpi.h>
 #include <linux/cpu.h>
 #include <linux/cpumask.h>
 #include <linux/init.h>
@@ -209,7 +210,12 @@ static struct notifier_block init_cpu_capacity_notifier = {
 
 static int __init register_cpufreq_notifier(void)
 {
-       if (cap_parsing_failed)
+       /*
+        * on ACPI-based systems we need to use the default cpu capacity
+        * until we have the necessary code to parse the cpu capacity, so
+        * skip registering cpufreq notifier.
+        */
+       if (!acpi_disabled || cap_parsing_failed)
                return -EINVAL;
 
        if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL)) {
index e04082700bb16c3598333de72b3c64659be63e5e..4a14b25163fb067ee2c3e575a45a20a58481a434 100644 (file)
@@ -558,7 +558,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
                                 unsigned long attrs)
 {
        bool coherent = is_device_dma_coherent(dev);
-       int ioprot = dma_direction_to_prot(DMA_BIDIRECTIONAL, coherent);
+       int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
        size_t iosize = size;
        void *addr;
 
@@ -712,7 +712,7 @@ static dma_addr_t __iommu_map_page(struct device *dev, struct page *page,
                                   unsigned long attrs)
 {
        bool coherent = is_device_dma_coherent(dev);
-       int prot = dma_direction_to_prot(dir, coherent);
+       int prot = dma_info_to_prot(dir, coherent, attrs);
        dma_addr_t dev_addr = iommu_dma_map_page(dev, page, offset, size, prot);
 
        if (!iommu_dma_mapping_error(dev, dev_addr) &&
@@ -770,7 +770,7 @@ static int __iommu_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
                __iommu_sync_sg_for_device(dev, sgl, nelems, dir);
 
        return iommu_dma_map_sg(dev, sgl, nelems,
-                       dma_direction_to_prot(dir, coherent));
+                               dma_info_to_prot(dir, coherent, attrs));
 }
 
 static void __iommu_unmap_sg_attrs(struct device *dev,
@@ -799,7 +799,6 @@ static struct dma_map_ops iommu_dma_ops = {
        .sync_sg_for_device = __iommu_sync_sg_for_device,
        .map_resource = iommu_dma_map_resource,
        .unmap_resource = iommu_dma_unmap_resource,
-       .dma_supported = iommu_dma_supported,
        .mapping_error = iommu_dma_mapping_error,
 };
 
index 241b9b9729d821510fb2addde4b276e73e6185a7..3d7ef2c17a7c8d382cabcf1bdaad87534ec0c268 100644 (file)
@@ -1,6 +1,5 @@
 
 generic-y += clkdev.h
-generic-y += cputime.h
 generic-y += delay.h
 generic-y += device.h
 generic-y += div64.h
index 2fb67b59d188dd2c29ff032d5647e1add6fecbe8..d6fa60b158be250010e8d71b72cb1a59338645b0 100644 (file)
@@ -2,7 +2,6 @@
 generic-y += auxvec.h
 generic-y += bitsperlong.h
 generic-y += bugs.h
-generic-y += cputime.h
 generic-y += current.h
 generic-y += device.h
 generic-y += div64.h
index 64465e7e224593a2e808f77f0fb6c735219d12a4..4e9f57433f3a405568f187cc6951e94318a65fac 100644 (file)
@@ -5,7 +5,6 @@ generic-y += barrier.h
 generic-y += bitsperlong.h
 generic-y += bugs.h
 generic-y += clkdev.h
-generic-y += cputime.h
 generic-y += current.h
 generic-y += device.h
 generic-y += div64.h
index 1778805f63809d378a5cafb6c920b517ea753c7b..9f19e19bff9d39117871446675563f7f9fc6426c 100644 (file)
@@ -4,7 +4,6 @@ generic-y += barrier.h
 generic-y += bitsperlong.h
 generic-y += clkdev.h
 generic-y += cmpxchg.h
-generic-y += cputime.h
 generic-y += device.h
 generic-y += div64.h
 generic-y += errno.h
index 1fa084cf1a4398934889658b8b21f66154bf0ab6..0f5b0d5d313ccc122142c2491e3048312ae0025c 100644 (file)
@@ -1,6 +1,5 @@
 
 generic-y += clkdev.h
-generic-y += cputime.h
 generic-y += exec.h
 generic-y += irq_work.h
 generic-y += mcs_spinlock.h
index 1c2a5e264fc71cfd52f2acb0b24ddb1aff792be7..e93c9494503ac8fc3cfaa8167ea3523abb3e2925 100644 (file)
@@ -139,7 +139,7 @@ static inline void atomic64_dec(atomic64_t *v)
 #define atomic64_sub_and_test(i,v)     (atomic64_sub_return((i), (v)) == 0)
 #define atomic64_dec_and_test(v)       (atomic64_dec_return((v)) == 0)
 #define atomic64_inc_and_test(v)       (atomic64_inc_return((v)) == 0)
-
+#define atomic64_inc_not_zero(v)       atomic64_add_unless((v), 1, 0)
 
 #define atomic_cmpxchg(v, old, new)    (cmpxchg(&(v)->counter, old, new))
 #define atomic_xchg(v, new)            (xchg(&(v)->counter, new))
@@ -161,6 +161,39 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
        return c;
 }
 
+static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u)
+{
+       long long c, old;
+
+       c = atomic64_read(v);
+       for (;;) {
+               if (unlikely(c == u))
+                       break;
+               old = atomic64_cmpxchg(v, c, c + i);
+               if (likely(old == c))
+                       break;
+               c = old;
+       }
+       return c != u;
+}
+
+static inline long long atomic64_dec_if_positive(atomic64_t *v)
+{
+       long long c, old, dec;
+
+       c = atomic64_read(v);
+       for (;;) {
+               dec = c - 1;
+               if (unlikely(dec < 0))
+                       break;
+               old = atomic64_cmpxchg((v), c, dec);
+               if (likely(old == c))
+                       break;
+               c = old;
+       }
+               return dec;
+}
+
 #define ATOMIC_OP(op)                                                  \
 static inline int atomic_fetch_##op(int i, atomic_t *v)                        \
 {                                                                      \
index 373cb23301e30248bfd62f2a08c6529f93db0382..5efd0c87f3c0acf8f9fac4e8d36898f0a0191a2c 100644 (file)
@@ -5,7 +5,6 @@ generic-y += bugs.h
 generic-y += cacheflush.h
 generic-y += checksum.h
 generic-y += clkdev.h
-generic-y += cputime.h
 generic-y += current.h
 generic-y += delay.h
 generic-y += device.h
index db8ddabc6bd2819ba579c4c407a569e2232daa51..a43a7c90e4af8a20ed9cae98b23af2471de25bea 100644 (file)
@@ -6,7 +6,6 @@ generic-y += barrier.h
 generic-y += bug.h
 generic-y += bugs.h
 generic-y += clkdev.h
-generic-y += cputime.h
 generic-y += current.h
 generic-y += device.h
 generic-y += div64.h
index e2d3f5baf265408b49a201e8fa07e3b3b24fe939..3d665c0627a86606a60f533192f9bb52ec10f698 100644 (file)
 #ifndef __IA64_CPUTIME_H
 #define __IA64_CPUTIME_H
 
-#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
-# include <asm-generic/cputime.h>
-#else
-# include <asm/processor.h>
-# include <asm-generic/cputime_nsecs.h>
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
 extern void arch_vtime_task_switch(struct task_struct *tsk);
 #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
 
index c7026429816be5d319462f2e9bdd1e373925f828..8742d741d19adcaaf0fe7ae8d757f6d9b568066b 100644 (file)
@@ -27,6 +27,12 @@ struct thread_info {
        mm_segment_t addr_limit;        /* user-level address space limit */
        int preempt_count;              /* 0=premptable, <0=BUG; will also serve as bh-counter */
 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
+       __u64 utime;
+       __u64 stime;
+       __u64 gtime;
+       __u64 hardirq_time;
+       __u64 softirq_time;
+       __u64 idle_time;
        __u64 ac_stamp;
        __u64 ac_leave;
        __u64 ac_stime;
index c9b5e942f67156f5b6b7cf4917658fbf85f1544f..3204fddc439c4ea5712f668e0f511842c239029e 100644 (file)
@@ -1031,7 +1031,7 @@ GLOBAL_ENTRY(ia64_native_sched_clock)
 END(ia64_native_sched_clock)
 
 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
-GLOBAL_ENTRY(cycle_to_cputime)
+GLOBAL_ENTRY(cycle_to_nsec)
        alloc r16=ar.pfs,1,0,0,0
        addl r8=THIS_CPU(ia64_cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0
        ;;
@@ -1047,7 +1047,7 @@ GLOBAL_ENTRY(cycle_to_cputime)
        ;;
        shrp r8=r9,r8,IA64_NSEC_PER_CYC_SHIFT
        br.ret.sptk.many rp
-END(cycle_to_cputime)
+END(cycle_to_nsec)
 #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
 
 #ifdef CONFIG_IA64_BRL_EMU
index 7ec7acc844c2e7aa36a1b3ed6acf5c51f5d18f99..c483ece3eb84c9b1ac3d271c7ffe43610a47931e 100644 (file)
@@ -619,6 +619,8 @@ setup_arch (char **cmdline_p)
        check_sal_cache_flush();
 #endif
        paging_init();
+
+       clear_sched_clock_stable();
 }
 
 /*
index 71775b95d6cc06cd9d8d8e9cadbf3fdb5333f967..faa116822c4c3ddcc8d74e5680e22dce9b7fa467 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/timex.h>
 #include <linux/timekeeper_internal.h>
 #include <linux/platform_device.h>
+#include <linux/cputime.h>
 
 #include <asm/machvec.h>
 #include <asm/delay.h>
@@ -59,18 +60,43 @@ static struct clocksource *itc_clocksource;
 
 #include <linux/kernel_stat.h>
 
-extern cputime_t cycle_to_cputime(u64 cyc);
+extern u64 cycle_to_nsec(u64 cyc);
 
-void vtime_account_user(struct task_struct *tsk)
+void vtime_flush(struct task_struct *tsk)
 {
-       cputime_t delta_utime;
        struct thread_info *ti = task_thread_info(tsk);
+       u64 delta;
 
-       if (ti->ac_utime) {
-               delta_utime = cycle_to_cputime(ti->ac_utime);
-               account_user_time(tsk, delta_utime);
-               ti->ac_utime = 0;
+       if (ti->utime)
+               account_user_time(tsk, cycle_to_nsec(ti->utime));
+
+       if (ti->gtime)
+               account_guest_time(tsk, cycle_to_nsec(ti->gtime));
+
+       if (ti->idle_time)
+               account_idle_time(cycle_to_nsec(ti->idle_time));
+
+       if (ti->stime) {
+               delta = cycle_to_nsec(ti->stime);
+               account_system_index_time(tsk, delta, CPUTIME_SYSTEM);
+       }
+
+       if (ti->hardirq_time) {
+               delta = cycle_to_nsec(ti->hardirq_time);
+               account_system_index_time(tsk, delta, CPUTIME_IRQ);
+       }
+
+       if (ti->softirq_time) {
+               delta = cycle_to_nsec(ti->softirq_time));
+               account_system_index_time(tsk, delta, CPUTIME_SOFTIRQ);
        }
+
+       ti->utime = 0;
+       ti->gtime = 0;
+       ti->idle_time = 0;
+       ti->stime = 0;
+       ti->hardirq_time = 0;
+       ti->softirq_time = 0;
 }
 
 /*
@@ -83,7 +109,7 @@ void arch_vtime_task_switch(struct task_struct *prev)
        struct thread_info *pi = task_thread_info(prev);
        struct thread_info *ni = task_thread_info(current);
 
-       pi->ac_stamp = ni->ac_stamp;
+       ni->ac_stamp = pi->ac_stamp;
        ni->ac_stime = ni->ac_utime = 0;
 }
 
@@ -91,18 +117,15 @@ void arch_vtime_task_switch(struct task_struct *prev)
  * Account time for a transition between system, hard irq or soft irq state.
  * Note that this function is called with interrupts enabled.
  */
-static cputime_t vtime_delta(struct task_struct *tsk)
+static __u64 vtime_delta(struct task_struct *tsk)
 {
        struct thread_info *ti = task_thread_info(tsk);
-       cputime_t delta_stime;
-       __u64 now;
+       __u64 now, delta_stime;
 
        WARN_ON_ONCE(!irqs_disabled());
 
        now = ia64_get_itc();
-
-       delta_stime = cycle_to_cputime(ti->ac_stime + (now - ti->ac_stamp));
-       ti->ac_stime = 0;
+       delta_stime = now - ti->ac_stamp;
        ti->ac_stamp = now;
 
        return delta_stime;
@@ -110,15 +133,25 @@ static cputime_t vtime_delta(struct task_struct *tsk)
 
 void vtime_account_system(struct task_struct *tsk)
 {
-       cputime_t delta = vtime_delta(tsk);
-
-       account_system_time(tsk, 0, delta);
+       struct thread_info *ti = task_thread_info(tsk);
+       __u64 stime = vtime_delta(tsk);
+
+       if ((tsk->flags & PF_VCPU) && !irq_count())
+               ti->gtime += stime;
+       else if (hardirq_count())
+               ti->hardirq_time += stime;
+       else if (in_serving_softirq())
+               ti->softirq_time += stime;
+       else
+               ti->stime += stime;
 }
 EXPORT_SYMBOL_GPL(vtime_account_system);
 
 void vtime_account_idle(struct task_struct *tsk)
 {
-       account_idle_time(vtime_delta(tsk));
+       struct thread_info *ti = task_thread_info(tsk);
+
+       ti->idle_time += vtime_delta(tsk);
 }
 
 #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
index 860e440611c98f7e0c2a9882c18dc73cf166f35e..652100b64a7159c4e530b625d1cfaf391cff6ea7 100644 (file)
@@ -1,6 +1,5 @@
 
 generic-y += clkdev.h
-generic-y += cputime.h
 generic-y += exec.h
 generic-y += irq_work.h
 generic-y += kvm_para.h
index e53caf4c3bfbf9d141e1f49857ce08f8da785cc2..419751b15ec8b5f1909636f4751789afdb23ac13 100644 (file)
@@ -45,9 +45,9 @@ void m68328_reset (void)
 
 void __init config_BSP(char *command, int len)
 {
-  printk(KERN_INFO "\n68328 support D. Jeff Dionne <jeff@uclinux.org>\n");
-  printk(KERN_INFO "68328 support Kenneth Albanowski <kjahds@kjshds.com>\n");
-  printk(KERN_INFO "68328/Pilot support Bernhard Kuhn <kuhn@lpr.e-technik.tu-muenchen.de>\n");
+  pr_info("68328 support D. Jeff Dionne <jeff@uclinux.org>\n");
+  pr_info("68328 support Kenneth Albanowski <kjahds@kjshds.com>\n");
+  pr_info("68328/Pilot support Bernhard Kuhn <kuhn@lpr.e-technik.tu-muenchen.de>\n");
 
   mach_hwclk = m68328_hwclk;
   mach_reset = m68328_reset;
index e6ab321f93f832e9b9e070f8da4aa00f856b82d5..6a309a3cfbfc1953b5baaec11370d231477898ea 100644 (file)
@@ -57,12 +57,12 @@ void __init config_BSP(char *command, int len)
 {
   unsigned char *p;
 
-  printk(KERN_INFO "\n68EZ328 DragonBallEZ support (C) 1999 Rt-Control, Inc\n");
+  pr_info("68EZ328 DragonBallEZ support (C) 1999 Rt-Control, Inc\n");
 
 #ifdef CONFIG_UCSIMM
-  printk(KERN_INFO "uCsimm serial string [%s]\n",getserialnum());
+  pr_info("uCsimm serial string [%s]\n", getserialnum());
   p = cs8900a_hwaddr = gethwaddr(0);
-  printk(KERN_INFO "uCsimm hwaddr %pM\n", p);
+  pr_info("uCsimm hwaddr %pM\n", p);
 
   p = getbenv("APPEND");
   if (p) strcpy(p,command);
index 1154bdb220a022cdf2d850138a9e74e8dec74bdf..81b5491685a4a2ab0ba72933fefaf8f72739ec1a 100644 (file)
@@ -150,9 +150,9 @@ static void __init init_hardware(char *command, int size)
 {
        char *p;
 
-       printk(KERN_INFO "uCdimm serial string [%s]\n", getserialnum());
+       pr_info("uCdimm serial string [%s]\n", getserialnum());
        p = cs8900a_hwaddr = gethwaddr(0);
-       printk(KERN_INFO "uCdimm hwaddr %pM\n", p);
+       pr_info("uCdimm hwaddr %pM\n", p);
        p = getbenv("APPEND");
        if (p)
                strcpy(p, command);
@@ -177,7 +177,7 @@ static void __init init_hardware(char *command, int size)
 
 void __init config_BSP(char *command, int size)
 {
-       printk(KERN_INFO "68VZ328 DragonBallVZ support (c) 2001 Lineo, Inc.\n");
+       pr_info("68VZ328 DragonBallVZ support (c) 2001 Lineo, Inc.\n");
 
        init_hardware(command, size);
 
index 264db11268039329266ef1a910e8de964f396a72..37091898adb3d3b54249df3f50558337d72ce751 100644 (file)
@@ -149,7 +149,7 @@ repeat:
        if (acia_stat & ACIA_OVRN) {
                /* a very fast typist or a slow system, give a warning */
                /* ...happens often if interrupts were disabled for too long */
-               printk(KERN_DEBUG "Keyboard overrun\n");
+               pr_debug("Keyboard overrun\n");
                scancode = acia.key_data;
                if (ikbd_self_test)
                        /* During self test, don't do resyncing, just process the code */
@@ -228,14 +228,14 @@ repeat:
                                        keytyp = KTYP(keyval) - 0xf0;
                                        keyval = KVAL(keyval);
 
-                                       printk(KERN_WARNING "Key with scancode %d ", scancode);
+                                       pr_warn("Key with scancode %d ", scancode);
                                        if (keytyp == KT_LATIN || keytyp == KT_LETTER) {
                                                if (keyval < ' ')
-                                                       printk("('^%c') ", keyval + '@');
+                                                       pr_cont("('^%c') ", keyval + '@');
                                                else
-                                                       printk("('%c') ", keyval);
+                                                       pr_cont("('%c') ", keyval);
                                        }
-                                       printk("is broken -- will be ignored.\n");
+                                       pr_cont("is broken -- will be ignored.\n");
                                        break;
                                } else if (test_bit(scancode, broken_keys))
                                        break;
@@ -299,7 +299,7 @@ repeat:
 #endif
 
        if (acia_stat & (ACIA_FE | ACIA_PE)) {
-               printk("Error in keyboard communication\n");
+               pr_err("Error in keyboard communication\n");
        }
 
        /* handle_scancode() can take a lot of time, so check again if
@@ -553,7 +553,7 @@ int atari_keyb_init(void)
                barrier();
        /* if not incremented: no 0xf1 received */
        if (ikbd_self_test == 1)
-               printk(KERN_ERR "WARNING: keyboard self test failed!\n");
+               pr_err("Keyboard self test failed!\n");
        ikbd_self_test = 0;
 
        ikbd_mouse_disable();
index e328eaf816e372ff07f8e0d5dc214aaf9ee18f88..565c6f06ab0b8193ef02f8028d6a8baf8ff00b06 100644 (file)
@@ -234,44 +234,44 @@ void __init config_atari(void)
         * Determine hardware present
         */
 
-       printk("Atari hardware found: ");
+       pr_info("Atari hardware found:");
        if (MACH_IS_MEDUSA) {
                /* There's no Atari video hardware on the Medusa, but all the
                 * addresses below generate a DTACK so no bus error occurs! */
        } else if (hwreg_present(f030_xreg)) {
                ATARIHW_SET(VIDEL_SHIFTER);
-               printk("VIDEL ");
+               pr_cont(" VIDEL");
                /* This is a temporary hack: If there is Falcon video
                 * hardware, we assume that the ST-DMA serves SCSI instead of
                 * ACSI. In the future, there should be a better method for
                 * this...
                 */
                ATARIHW_SET(ST_SCSI);
-               printk("STDMA-SCSI ");
+               pr_cont(" STDMA-SCSI");
        } else if (hwreg_present(tt_palette)) {
                ATARIHW_SET(TT_SHIFTER);
-               printk("TT_SHIFTER ");
+               pr_cont(" TT_SHIFTER");
        } else if (hwreg_present(&shifter.bas_hi)) {
                if (hwreg_present(&shifter.bas_lo) &&
                    (shifter.bas_lo = 0x0aau, shifter.bas_lo == 0x0aau)) {
                        ATARIHW_SET(EXTD_SHIFTER);
-                       printk("EXTD_SHIFTER ");
+                       pr_cont(" EXTD_SHIFTER");
                } else {
                        ATARIHW_SET(STND_SHIFTER);
-                       printk("STND_SHIFTER ");
+                       pr_cont(" STND_SHIFTER");
                }
        }
        if (hwreg_present(&st_mfp.par_dt_reg)) {
                ATARIHW_SET(ST_MFP);
-               printk("ST_MFP ");
+               pr_cont(" ST_MFP");
        }
        if (hwreg_present(&tt_mfp.par_dt_reg)) {
                ATARIHW_SET(TT_MFP);
-               printk("TT_MFP ");
+               pr_cont(" TT_MFP");
        }
        if (hwreg_present(&tt_scsi_dma.dma_addr_hi)) {
                ATARIHW_SET(SCSI_DMA);
-               printk("TT_SCSI_DMA ");
+               pr_cont(" TT_SCSI_DMA");
        }
        /*
         * The ST-DMA address registers aren't readable
@@ -284,27 +284,27 @@ void __init config_atari(void)
             (st_dma.dma_vhi = 0xaa) && (st_dma.dma_hi = 0x55) &&
             st_dma.dma_vhi == 0xaa && st_dma.dma_hi == 0x55)) {
                ATARIHW_SET(EXTD_DMA);
-               printk("EXTD_DMA ");
+               pr_cont(" EXTD_DMA");
        }
        if (hwreg_present(&tt_scsi.scsi_data)) {
                ATARIHW_SET(TT_SCSI);
-               printk("TT_SCSI ");
+               pr_cont(" TT_SCSI");
        }
        if (hwreg_present(&sound_ym.rd_data_reg_sel)) {
                ATARIHW_SET(YM_2149);
-               printk("YM2149 ");
+               pr_cont(" YM2149");
        }
        if (!MACH_IS_MEDUSA && hwreg_present(&tt_dmasnd.ctrl)) {
                ATARIHW_SET(PCM_8BIT);
-               printk("PCM ");
+               pr_cont(" PCM");
        }
        if (hwreg_present(&falcon_codec.unused5)) {
                ATARIHW_SET(CODEC);
-               printk("CODEC ");
+               pr_cont(" CODEC");
        }
        if (hwreg_present(&dsp56k_host_interface.icr)) {
                ATARIHW_SET(DSP56K);
-               printk("DSP56K ");
+               pr_cont(" DSP56K");
        }
        if (hwreg_present(&tt_scc_dma.dma_ctrl) &&
 #if 0
@@ -316,33 +316,33 @@ void __init config_atari(void)
 #endif
            ) {
                ATARIHW_SET(SCC_DMA);
-               printk("SCC_DMA ");
+               pr_cont(" SCC_DMA");
        }
        if (scc_test(&atari_scc.cha_a_ctrl)) {
                ATARIHW_SET(SCC);
-               printk("SCC ");
+               pr_cont(" SCC");
        }
        if (scc_test(&st_escc.cha_b_ctrl)) {
                ATARIHW_SET(ST_ESCC);
-               printk("ST_ESCC ");
+               pr_cont(" ST_ESCC");
        }
        if (hwreg_present(&tt_scu.sys_mask)) {
                ATARIHW_SET(SCU);
                /* Assume a VME bus if there's a SCU */
                ATARIHW_SET(VME);
-               printk("VME SCU ");
+               pr_cont(" VME SCU");
        }
        if (hwreg_present((void *)(0xffff9210))) {
                ATARIHW_SET(ANALOG_JOY);
-               printk("ANALOG_JOY ");
+               pr_cont(" ANALOG_JOY");
        }
        if (hwreg_present(blitter.halftone)) {
                ATARIHW_SET(BLITTER);
-               printk("BLITTER ");
+               pr_cont(" BLITTER");
        }
        if (hwreg_present((void *)0xfff00039)) {
                ATARIHW_SET(IDE);
-               printk("IDE ");
+               pr_cont(" IDE");
        }
 #if 1 /* This maybe wrong */
        if (!MACH_IS_MEDUSA && hwreg_present(&tt_microwire.data) &&
@@ -355,31 +355,31 @@ void __init config_atari(void)
                ATARIHW_SET(MICROWIRE);
                while (tt_microwire.mask != 0x7ff)
                        ;
-               printk("MICROWIRE ");
+               pr_cont(" MICROWIRE");
        }
 #endif
        if (hwreg_present(&tt_rtc.regsel)) {
                ATARIHW_SET(TT_CLK);
-               printk("TT_CLK ");
+               pr_cont(" TT_CLK");
                mach_hwclk = atari_tt_hwclk;
                mach_set_clock_mmss = atari_tt_set_clock_mmss;
        }
        if (hwreg_present(&mste_rtc.sec_ones)) {
                ATARIHW_SET(MSTE_CLK);
-               printk("MSTE_CLK ");
+               pr_cont(" MSTE_CLK");
                mach_hwclk = atari_mste_hwclk;
                mach_set_clock_mmss = atari_mste_set_clock_mmss;
        }
        if (!MACH_IS_MEDUSA && hwreg_present(&dma_wd.fdc_speed) &&
            hwreg_write(&dma_wd.fdc_speed, 0)) {
                ATARIHW_SET(FDCSPEED);
-               printk("FDC_SPEED ");
+               pr_cont(" FDC_SPEED");
        }
        if (!ATARIHW_PRESENT(ST_SCSI)) {
                ATARIHW_SET(ACSI);
-               printk("ACSI ");
+               pr_cont(" ACSI");
        }
-       printk("\n");
+       pr_cont("\n");
 
        if (CPU_IS_040_OR_060)
                /* Now it seems to be safe to turn of the tt0 transparent
index 611d4d9ea2bd80aca5cdbf8425fcdbe4c2e8f67b..2cfff47650407479cae907205df89ea039934a37 100644 (file)
@@ -63,8 +63,8 @@ void bvme6000_reset(void)
 {
        volatile PitRegsPtr pit = (PitRegsPtr)BVME_PIT_BASE;
 
-       printk ("\r\n\nCalled bvme6000_reset\r\n"
-                       "\r\r\r\r\r\r\r\r\r\r\r\r\r\r\r\r\r\r");
+       pr_info("\r\n\nCalled bvme6000_reset\r\n"
+               "\r\r\r\r\r\r\r\r\r\r\r\r\r\r\r\r\r\r");
        /* The string of returns is to delay the reset until the whole
         * message is output. */
        /* Enable the watchdog, via PIT port C bit 4 */
@@ -117,8 +117,8 @@ void __init config_bvme6000(void)
     mach_reset          = bvme6000_reset;
     mach_get_model       = bvme6000_get_model;
 
-    printk ("Board is %sconfigured as a System Controller\n",
-               *config_reg_ptr & BVME_CONFIG_SW1 ? "" : "not ");
+    pr_info("Board is %sconfigured as a System Controller\n",
+           *config_reg_ptr & BVME_CONFIG_SW1 ? "" : "not ");
 
     /* Now do the PIT configuration */
 
index d53c9b301f84af754677190bcd021f50595bd3af..e4f1faffe32bad16b950dfd51037077d6df372eb 100644 (file)
@@ -168,7 +168,7 @@ static int __init rtc_DP8570A_init(void)
        if (!MACH_IS_BVME6000)
                return -ENODEV;
 
-       printk(KERN_INFO "DP8570A Real Time Clock Driver v%s\n", RTC_VERSION);
+       pr_info("DP8570A Real Time Clock Driver v%s\n", RTC_VERSION);
        return misc_register(&rtc_dev);
 }
 module_init(rtc_DP8570A_init);
index b98acd15ca22d59fce754a93ee44c4d5619804f8..048bf076f7df66a35fd4d11addd015e9ec285fc9 100644 (file)
@@ -66,6 +66,7 @@ CONFIG_INET_XFRM_MODE_TUNNEL=m
 CONFIG_INET_XFRM_MODE_BEET=m
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
+CONFIG_INET_RAW_DIAG=m
 CONFIG_IPV6=m
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_INET6_AH=m
@@ -76,10 +77,10 @@ CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
 CONFIG_NF_CONNTRACK=m
+CONFIG_NF_LOG_NETDEV=m
 CONFIG_NF_CONNTRACK_ZONES=y
 # CONFIG_NF_CONNTRACK_PROCFS is not set
 # CONFIG_NF_CT_PROTO_DCCP is not set
-CONFIG_NF_CT_PROTO_UDPLITE=m
 CONFIG_NF_CONNTRACK_AMANDA=m
 CONFIG_NF_CONNTRACK_FTP=m
 CONFIG_NF_CONNTRACK_H323=m
@@ -95,6 +96,7 @@ CONFIG_NF_TABLES_INET=m
 CONFIG_NF_TABLES_NETDEV=m
 CONFIG_NFT_EXTHDR=m
 CONFIG_NFT_META=m
+CONFIG_NFT_RT=m
 CONFIG_NFT_NUMGEN=m
 CONFIG_NFT_CT=m
 CONFIG_NFT_SET_RBTREE=m
@@ -105,11 +107,13 @@ CONFIG_NFT_LIMIT=m
 CONFIG_NFT_MASQ=m
 CONFIG_NFT_REDIR=m
 CONFIG_NFT_NAT=m
+CONFIG_NFT_OBJREF=m
 CONFIG_NFT_QUEUE=m
 CONFIG_NFT_QUOTA=m
 CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
 CONFIG_NFT_HASH=m
+CONFIG_NFT_FIB_INET=m
 CONFIG_NFT_DUP_NETDEV=m
 CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NETFILTER_XT_SET=m
@@ -176,6 +180,7 @@ CONFIG_IP_SET_HASH_IPMARK=m
 CONFIG_IP_SET_HASH_IPPORT=m
 CONFIG_IP_SET_HASH_IPPORTIP=m
 CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_IPMAC=m
 CONFIG_IP_SET_HASH_MAC=m
 CONFIG_IP_SET_HASH_NETPORTNET=m
 CONFIG_IP_SET_HASH_NET=m
@@ -184,8 +189,10 @@ CONFIG_IP_SET_HASH_NETPORT=m
 CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_NF_SOCKET_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
 CONFIG_NFT_DUP_IPV4=m
+CONFIG_NFT_FIB_IPV4=m
 CONFIG_NF_TABLES_ARP=m
 CONFIG_NF_LOG_ARP=m
 CONFIG_NFT_CHAIN_NAT_IPV4=m
@@ -212,8 +219,10 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
+CONFIG_NF_SOCKET_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
 CONFIG_NFT_DUP_IPV6=m
+CONFIG_NFT_FIB_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
 CONFIG_NFT_REDIR_IPV6=m
@@ -294,6 +303,7 @@ CONFIG_NET_DEVLINK=m
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
 # CONFIG_FIRMWARE_IN_KERNEL is not set
+CONFIG_TEST_ASYNC_DRIVER_PROBE=m
 CONFIG_CONNECTOR=m
 CONFIG_PARPORT=m
 CONFIG_PARPORT_AMIGA=m
@@ -369,6 +379,7 @@ CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
 CONFIG_VETH=m
 # CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NET_VENDOR_ALACRITECH is not set
 # CONFIG_NET_VENDOR_AMAZON is not set
 CONFIG_A2065=y
 CONFIG_ARIADNE=y
@@ -390,6 +401,7 @@ CONFIG_ZORRO8390=y
 # CONFIG_NET_VENDOR_ROCKER is not set
 # CONFIG_NET_VENDOR_SAMSUNG is not set
 # CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SOLARFLARE is not set
 # CONFIG_NET_VENDOR_SMSC is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
 # CONFIG_NET_VENDOR_SYNOPSYS is not set
@@ -421,7 +433,6 @@ CONFIG_INPUT_MISC=y
 CONFIG_INPUT_M68K_BEEP=m
 # CONFIG_SERIO is not set
 # CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
 CONFIG_PRINTER=m
 # CONFIG_HW_RANDOM is not set
 CONFIG_NTP_PPS=y
@@ -569,6 +580,7 @@ CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
 CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
+CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_DH=m
 CONFIG_CRYPTO_ECDH=m
index f80dc57e6374d3cf5e001eb29ca1ed8adb3b71d0..d4de24963f5f7434e5fab612629c23149fc0389c 100644 (file)
@@ -64,6 +64,7 @@ CONFIG_INET_XFRM_MODE_TUNNEL=m
 CONFIG_INET_XFRM_MODE_BEET=m
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
+CONFIG_INET_RAW_DIAG=m
 CONFIG_IPV6=m
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_INET6_AH=m
@@ -74,10 +75,10 @@ CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
 CONFIG_NF_CONNTRACK=m
+CONFIG_NF_LOG_NETDEV=m
 CONFIG_NF_CONNTRACK_ZONES=y
 # CONFIG_NF_CONNTRACK_PROCFS is not set
 # CONFIG_NF_CT_PROTO_DCCP is not set
-CONFIG_NF_CT_PROTO_UDPLITE=m
 CONFIG_NF_CONNTRACK_AMANDA=m
 CONFIG_NF_CONNTRACK_FTP=m
 CONFIG_NF_CONNTRACK_H323=m
@@ -93,6 +94,7 @@ CONFIG_NF_TABLES_INET=m
 CONFIG_NF_TABLES_NETDEV=m
 CONFIG_NFT_EXTHDR=m
 CONFIG_NFT_META=m
+CONFIG_NFT_RT=m
 CONFIG_NFT_NUMGEN=m
 CONFIG_NFT_CT=m
 CONFIG_NFT_SET_RBTREE=m
@@ -103,11 +105,13 @@ CONFIG_NFT_LIMIT=m
 CONFIG_NFT_MASQ=m
 CONFIG_NFT_REDIR=m
 CONFIG_NFT_NAT=m
+CONFIG_NFT_OBJREF=m
 CONFIG_NFT_QUEUE=m
 CONFIG_NFT_QUOTA=m
 CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
 CONFIG_NFT_HASH=m
+CONFIG_NFT_FIB_INET=m
 CONFIG_NFT_DUP_NETDEV=m
 CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NETFILTER_XT_SET=m
@@ -174,6 +178,7 @@ CONFIG_IP_SET_HASH_IPMARK=m
 CONFIG_IP_SET_HASH_IPPORT=m
 CONFIG_IP_SET_HASH_IPPORTIP=m
 CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_IPMAC=m
 CONFIG_IP_SET_HASH_MAC=m
 CONFIG_IP_SET_HASH_NETPORTNET=m
 CONFIG_IP_SET_HASH_NET=m
@@ -182,8 +187,10 @@ CONFIG_IP_SET_HASH_NETPORT=m
 CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_NF_SOCKET_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
 CONFIG_NFT_DUP_IPV4=m
+CONFIG_NFT_FIB_IPV4=m
 CONFIG_NF_TABLES_ARP=m
 CONFIG_NF_LOG_ARP=m
 CONFIG_NFT_CHAIN_NAT_IPV4=m
@@ -210,8 +217,10 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
+CONFIG_NF_SOCKET_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
 CONFIG_NFT_DUP_IPV6=m
+CONFIG_NFT_FIB_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
 CONFIG_NFT_REDIR_IPV6=m
@@ -292,6 +301,7 @@ CONFIG_NET_DEVLINK=m
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
 # CONFIG_FIRMWARE_IN_KERNEL is not set
+CONFIG_TEST_ASYNC_DRIVER_PROBE=m
 CONFIG_CONNECTOR=m
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_CRYPTOLOOP=m
@@ -350,6 +360,7 @@ CONFIG_MACSEC=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
 CONFIG_VETH=m
+# CONFIG_NET_VENDOR_ALACRITECH is not set
 # CONFIG_NET_VENDOR_AMAZON is not set
 # CONFIG_NET_VENDOR_ARC is not set
 # CONFIG_NET_CADENCE is not set
@@ -365,6 +376,7 @@ CONFIG_VETH=m
 # CONFIG_NET_VENDOR_ROCKER is not set
 # CONFIG_NET_VENDOR_SAMSUNG is not set
 # CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SOLARFLARE is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
 # CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
@@ -391,7 +403,6 @@ CONFIG_MOUSE_SERIAL=m
 CONFIG_SERIO=m
 CONFIG_USERIO=m
 # CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
 # CONFIG_HW_RANDOM is not set
 CONFIG_NTP_PPS=y
 CONFIG_PPS_CLIENT_LDISC=m
@@ -528,6 +539,7 @@ CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
 CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
+CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_DH=m
 CONFIG_CRYPTO_ECDH=m
index 4e16b1821fbbf0d89c91b3d0fd16b419aef29307..fc0fd3f871f3348233c720465d2707fd97c8d94f 100644 (file)
@@ -64,6 +64,7 @@ CONFIG_INET_XFRM_MODE_TUNNEL=m
 CONFIG_INET_XFRM_MODE_BEET=m
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
+CONFIG_INET_RAW_DIAG=m
 CONFIG_IPV6=m
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_INET6_AH=m
@@ -74,10 +75,10 @@ CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
 CONFIG_NF_CONNTRACK=m
+CONFIG_NF_LOG_NETDEV=m
 CONFIG_NF_CONNTRACK_ZONES=y
 # CONFIG_NF_CONNTRACK_PROCFS is not set
 # CONFIG_NF_CT_PROTO_DCCP is not set
-CONFIG_NF_CT_PROTO_UDPLITE=m
 CONFIG_NF_CONNTRACK_AMANDA=m
 CONFIG_NF_CONNTRACK_FTP=m
 CONFIG_NF_CONNTRACK_H323=m
@@ -93,6 +94,7 @@ CONFIG_NF_TABLES_INET=m
 CONFIG_NF_TABLES_NETDEV=m
 CONFIG_NFT_EXTHDR=m
 CONFIG_NFT_META=m
+CONFIG_NFT_RT=m
 CONFIG_NFT_NUMGEN=m
 CONFIG_NFT_CT=m
 CONFIG_NFT_SET_RBTREE=m
@@ -103,11 +105,13 @@ CONFIG_NFT_LIMIT=m
 CONFIG_NFT_MASQ=m
 CONFIG_NFT_REDIR=m
 CONFIG_NFT_NAT=m
+CONFIG_NFT_OBJREF=m
 CONFIG_NFT_QUEUE=m
 CONFIG_NFT_QUOTA=m
 CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
 CONFIG_NFT_HASH=m
+CONFIG_NFT_FIB_INET=m
 CONFIG_NFT_DUP_NETDEV=m
 CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NETFILTER_XT_SET=m
@@ -174,6 +178,7 @@ CONFIG_IP_SET_HASH_IPMARK=m
 CONFIG_IP_SET_HASH_IPPORT=m
 CONFIG_IP_SET_HASH_IPPORTIP=m
 CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_IPMAC=m
 CONFIG_IP_SET_HASH_MAC=m
 CONFIG_IP_SET_HASH_NETPORTNET=m
 CONFIG_IP_SET_HASH_NET=m
@@ -182,8 +187,10 @@ CONFIG_IP_SET_HASH_NETPORT=m
 CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_NF_SOCKET_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
 CONFIG_NFT_DUP_IPV4=m
+CONFIG_NFT_FIB_IPV4=m
 CONFIG_NF_TABLES_ARP=m
 CONFIG_NF_LOG_ARP=m
 CONFIG_NFT_CHAIN_NAT_IPV4=m
@@ -210,8 +217,10 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
+CONFIG_NF_SOCKET_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
 CONFIG_NFT_DUP_IPV6=m
+CONFIG_NFT_FIB_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
 CONFIG_NFT_REDIR_IPV6=m
@@ -292,6 +301,7 @@ CONFIG_NET_DEVLINK=m
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
 # CONFIG_FIRMWARE_IN_KERNEL is not set
+CONFIG_TEST_ASYNC_DRIVER_PROBE=m
 CONFIG_CONNECTOR=m
 CONFIG_PARPORT=m
 CONFIG_PARPORT_ATARI=m
@@ -359,6 +369,7 @@ CONFIG_MACSEC=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
 CONFIG_VETH=m
+# CONFIG_NET_VENDOR_ALACRITECH is not set
 # CONFIG_NET_VENDOR_AMAZON is not set
 CONFIG_ATARILANCE=y
 # CONFIG_NET_VENDOR_ARC is not set
@@ -375,6 +386,7 @@ CONFIG_NE2000=y
 # CONFIG_NET_VENDOR_ROCKER is not set
 # CONFIG_NET_VENDOR_SAMSUNG is not set
 # CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SOLARFLARE is not set
 CONFIG_SMC91X=y
 # CONFIG_NET_VENDOR_STMICRO is not set
 # CONFIG_NET_VENDOR_SYNOPSYS is not set
@@ -404,7 +416,6 @@ CONFIG_INPUT_MISC=y
 CONFIG_INPUT_M68K_BEEP=m
 # CONFIG_SERIO is not set
 # CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
 CONFIG_PRINTER=m
 # CONFIG_HW_RANDOM is not set
 CONFIG_NTP_PPS=y
@@ -549,6 +560,7 @@ CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
 CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
+CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_DH=m
 CONFIG_CRYPTO_ECDH=m
index 2767bbf5ad61c18afc264590155baa789f3b427e..52e984a0aa696a503f458f2dd853913a1f32a52b 100644 (file)
@@ -62,6 +62,7 @@ CONFIG_INET_XFRM_MODE_TUNNEL=m
 CONFIG_INET_XFRM_MODE_BEET=m
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
+CONFIG_INET_RAW_DIAG=m
 CONFIG_IPV6=m
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_INET6_AH=m
@@ -72,10 +73,10 @@ CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
 CONFIG_NF_CONNTRACK=m
+CONFIG_NF_LOG_NETDEV=m
 CONFIG_NF_CONNTRACK_ZONES=y
 # CONFIG_NF_CONNTRACK_PROCFS is not set
 # CONFIG_NF_CT_PROTO_DCCP is not set
-CONFIG_NF_CT_PROTO_UDPLITE=m
 CONFIG_NF_CONNTRACK_AMANDA=m
 CONFIG_NF_CONNTRACK_FTP=m
 CONFIG_NF_CONNTRACK_H323=m
@@ -91,6 +92,7 @@ CONFIG_NF_TABLES_INET=m
 CONFIG_NF_TABLES_NETDEV=m
 CONFIG_NFT_EXTHDR=m
 CONFIG_NFT_META=m
+CONFIG_NFT_RT=m
 CONFIG_NFT_NUMGEN=m
 CONFIG_NFT_CT=m
 CONFIG_NFT_SET_RBTREE=m
@@ -101,11 +103,13 @@ CONFIG_NFT_LIMIT=m
 CONFIG_NFT_MASQ=m
 CONFIG_NFT_REDIR=m
 CONFIG_NFT_NAT=m
+CONFIG_NFT_OBJREF=m
 CONFIG_NFT_QUEUE=m
 CONFIG_NFT_QUOTA=m
 CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
 CONFIG_NFT_HASH=m
+CONFIG_NFT_FIB_INET=m
 CONFIG_NFT_DUP_NETDEV=m
 CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NETFILTER_XT_SET=m
@@ -172,6 +176,7 @@ CONFIG_IP_SET_HASH_IPMARK=m
 CONFIG_IP_SET_HASH_IPPORT=m
 CONFIG_IP_SET_HASH_IPPORTIP=m
 CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_IPMAC=m
 CONFIG_IP_SET_HASH_MAC=m
 CONFIG_IP_SET_HASH_NETPORTNET=m
 CONFIG_IP_SET_HASH_NET=m
@@ -180,8 +185,10 @@ CONFIG_IP_SET_HASH_NETPORT=m
 CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_NF_SOCKET_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
 CONFIG_NFT_DUP_IPV4=m
+CONFIG_NFT_FIB_IPV4=m
 CONFIG_NF_TABLES_ARP=m
 CONFIG_NF_LOG_ARP=m
 CONFIG_NFT_CHAIN_NAT_IPV4=m
@@ -208,8 +215,10 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
+CONFIG_NF_SOCKET_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
 CONFIG_NFT_DUP_IPV6=m
+CONFIG_NFT_FIB_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
 CONFIG_NFT_REDIR_IPV6=m
@@ -290,6 +299,7 @@ CONFIG_NET_DEVLINK=m
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
 # CONFIG_FIRMWARE_IN_KERNEL is not set
+CONFIG_TEST_ASYNC_DRIVER_PROBE=m
 CONFIG_CONNECTOR=m
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_CRYPTOLOOP=m
@@ -349,6 +359,7 @@ CONFIG_MACSEC=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
 CONFIG_VETH=m
+# CONFIG_NET_VENDOR_ALACRITECH is not set
 # CONFIG_NET_VENDOR_AMAZON is not set
 # CONFIG_NET_VENDOR_ARC is not set
 # CONFIG_NET_CADENCE is not set
@@ -364,6 +375,7 @@ CONFIG_BVME6000_NET=y
 # CONFIG_NET_VENDOR_ROCKER is not set
 # CONFIG_NET_VENDOR_SAMSUNG is not set
 # CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SOLARFLARE is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
 # CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
@@ -389,7 +401,6 @@ CONFIG_INPUT_EVDEV=m
 # CONFIG_SERIO is not set
 CONFIG_VT_HW_CONSOLE_BINDING=y
 # CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
 # CONFIG_HW_RANDOM is not set
 CONFIG_NTP_PPS=y
 CONFIG_PPS_CLIENT_LDISC=m
@@ -520,6 +531,7 @@ CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
 CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
+CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_DH=m
 CONFIG_CRYPTO_ECDH=m
index d13ba309265e5d979d607ba02bbbdcf7bf0d7d71..aaeed4422cc97525600537135669e9da3f865b3e 100644 (file)
@@ -64,6 +64,7 @@ CONFIG_INET_XFRM_MODE_TUNNEL=m
 CONFIG_INET_XFRM_MODE_BEET=m
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
+CONFIG_INET_RAW_DIAG=m
 CONFIG_IPV6=m
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_INET6_AH=m
@@ -74,10 +75,10 @@ CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
 CONFIG_NF_CONNTRACK=m
+CONFIG_NF_LOG_NETDEV=m
 CONFIG_NF_CONNTRACK_ZONES=y
 # CONFIG_NF_CONNTRACK_PROCFS is not set
 # CONFIG_NF_CT_PROTO_DCCP is not set
-CONFIG_NF_CT_PROTO_UDPLITE=m
 CONFIG_NF_CONNTRACK_AMANDA=m
 CONFIG_NF_CONNTRACK_FTP=m
 CONFIG_NF_CONNTRACK_H323=m
@@ -93,6 +94,7 @@ CONFIG_NF_TABLES_INET=m
 CONFIG_NF_TABLES_NETDEV=m
 CONFIG_NFT_EXTHDR=m
 CONFIG_NFT_META=m
+CONFIG_NFT_RT=m
 CONFIG_NFT_NUMGEN=m
 CONFIG_NFT_CT=m
 CONFIG_NFT_SET_RBTREE=m
@@ -103,11 +105,13 @@ CONFIG_NFT_LIMIT=m
 CONFIG_NFT_MASQ=m
 CONFIG_NFT_REDIR=m
 CONFIG_NFT_NAT=m
+CONFIG_NFT_OBJREF=m
 CONFIG_NFT_QUEUE=m
 CONFIG_NFT_QUOTA=m
 CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
 CONFIG_NFT_HASH=m
+CONFIG_NFT_FIB_INET=m
 CONFIG_NFT_DUP_NETDEV=m
 CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NETFILTER_XT_SET=m
@@ -174,6 +178,7 @@ CONFIG_IP_SET_HASH_IPMARK=m
 CONFIG_IP_SET_HASH_IPPORT=m
 CONFIG_IP_SET_HASH_IPPORTIP=m
 CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_IPMAC=m
 CONFIG_IP_SET_HASH_MAC=m
 CONFIG_IP_SET_HASH_NETPORTNET=m
 CONFIG_IP_SET_HASH_NET=m
@@ -182,8 +187,10 @@ CONFIG_IP_SET_HASH_NETPORT=m
 CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_NF_SOCKET_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
 CONFIG_NFT_DUP_IPV4=m
+CONFIG_NFT_FIB_IPV4=m
 CONFIG_NF_TABLES_ARP=m
 CONFIG_NF_LOG_ARP=m
 CONFIG_NFT_CHAIN_NAT_IPV4=m
@@ -210,8 +217,10 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
+CONFIG_NF_SOCKET_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
 CONFIG_NFT_DUP_IPV6=m
+CONFIG_NFT_FIB_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
 CONFIG_NFT_REDIR_IPV6=m
@@ -292,6 +301,7 @@ CONFIG_NET_DEVLINK=m
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
 # CONFIG_FIRMWARE_IN_KERNEL is not set
+CONFIG_TEST_ASYNC_DRIVER_PROBE=m
 CONFIG_CONNECTOR=m
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_CRYPTOLOOP=m
@@ -350,6 +360,7 @@ CONFIG_MACSEC=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
 CONFIG_VETH=m
+# CONFIG_NET_VENDOR_ALACRITECH is not set
 # CONFIG_NET_VENDOR_AMAZON is not set
 CONFIG_HPLANCE=y
 # CONFIG_NET_VENDOR_ARC is not set
@@ -366,6 +377,7 @@ CONFIG_HPLANCE=y
 # CONFIG_NET_VENDOR_ROCKER is not set
 # CONFIG_NET_VENDOR_SAMSUNG is not set
 # CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SOLARFLARE is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
 # CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
@@ -394,7 +406,6 @@ CONFIG_HP_SDC_RTC=m
 CONFIG_SERIO_SERPORT=m
 CONFIG_USERIO=m
 # CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
 # CONFIG_HW_RANDOM is not set
 CONFIG_NTP_PPS=y
 CONFIG_PPS_CLIENT_LDISC=m
@@ -530,6 +541,7 @@ CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
 CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
+CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_DH=m
 CONFIG_CRYPTO_ECDH=m
index 78b5101c1aa63476ffe8294927385328f411af01..3bbc9b2f0dac0fb890183d11d369655fdfd68201 100644 (file)
@@ -63,6 +63,7 @@ CONFIG_INET_XFRM_MODE_TUNNEL=m
 CONFIG_INET_XFRM_MODE_BEET=m
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
+CONFIG_INET_RAW_DIAG=m
 CONFIG_IPV6=m
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_INET6_AH=m
@@ -73,10 +74,10 @@ CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
 CONFIG_NF_CONNTRACK=m
+CONFIG_NF_LOG_NETDEV=m
 CONFIG_NF_CONNTRACK_ZONES=y
 # CONFIG_NF_CONNTRACK_PROCFS is not set
 # CONFIG_NF_CT_PROTO_DCCP is not set
-CONFIG_NF_CT_PROTO_UDPLITE=m
 CONFIG_NF_CONNTRACK_AMANDA=m
 CONFIG_NF_CONNTRACK_FTP=m
 CONFIG_NF_CONNTRACK_H323=m
@@ -92,6 +93,7 @@ CONFIG_NF_TABLES_INET=m
 CONFIG_NF_TABLES_NETDEV=m
 CONFIG_NFT_EXTHDR=m
 CONFIG_NFT_META=m
+CONFIG_NFT_RT=m
 CONFIG_NFT_NUMGEN=m
 CONFIG_NFT_CT=m
 CONFIG_NFT_SET_RBTREE=m
@@ -102,11 +104,13 @@ CONFIG_NFT_LIMIT=m
 CONFIG_NFT_MASQ=m
 CONFIG_NFT_REDIR=m
 CONFIG_NFT_NAT=m
+CONFIG_NFT_OBJREF=m
 CONFIG_NFT_QUEUE=m
 CONFIG_NFT_QUOTA=m
 CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
 CONFIG_NFT_HASH=m
+CONFIG_NFT_FIB_INET=m
 CONFIG_NFT_DUP_NETDEV=m
 CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NETFILTER_XT_SET=m
@@ -173,6 +177,7 @@ CONFIG_IP_SET_HASH_IPMARK=m
 CONFIG_IP_SET_HASH_IPPORT=m
 CONFIG_IP_SET_HASH_IPPORTIP=m
 CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_IPMAC=m
 CONFIG_IP_SET_HASH_MAC=m
 CONFIG_IP_SET_HASH_NETPORTNET=m
 CONFIG_IP_SET_HASH_NET=m
@@ -181,8 +186,10 @@ CONFIG_IP_SET_HASH_NETPORT=m
 CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_NF_SOCKET_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
 CONFIG_NFT_DUP_IPV4=m
+CONFIG_NFT_FIB_IPV4=m
 CONFIG_NF_TABLES_ARP=m
 CONFIG_NF_LOG_ARP=m
 CONFIG_NFT_CHAIN_NAT_IPV4=m
@@ -209,8 +216,10 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
+CONFIG_NF_SOCKET_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
 CONFIG_NFT_DUP_IPV6=m
+CONFIG_NFT_FIB_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
 CONFIG_NFT_REDIR_IPV6=m
@@ -294,6 +303,7 @@ CONFIG_NET_DEVLINK=m
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
 # CONFIG_FIRMWARE_IN_KERNEL is not set
+CONFIG_TEST_ASYNC_DRIVER_PROBE=m
 CONFIG_CONNECTOR=m
 CONFIG_BLK_DEV_SWIM=m
 CONFIG_BLK_DEV_LOOP=y
@@ -366,6 +376,7 @@ CONFIG_MACSEC=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
 CONFIG_VETH=m
+# CONFIG_NET_VENDOR_ALACRITECH is not set
 # CONFIG_NET_VENDOR_AMAZON is not set
 CONFIG_MACMACE=y
 # CONFIG_NET_VENDOR_ARC is not set
@@ -384,6 +395,7 @@ CONFIG_MAC8390=y
 # CONFIG_NET_VENDOR_ROCKER is not set
 # CONFIG_NET_VENDOR_SAMSUNG is not set
 # CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SOLARFLARE is not set
 # CONFIG_NET_VENDOR_SMSC is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
 # CONFIG_NET_VENDOR_SYNOPSYS is not set
@@ -413,7 +425,6 @@ CONFIG_INPUT_M68K_BEEP=m
 CONFIG_SERIO=m
 CONFIG_USERIO=m
 # CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
 CONFIG_SERIAL_PMACZILOG=y
 CONFIG_SERIAL_PMACZILOG_TTYS=y
 CONFIG_SERIAL_PMACZILOG_CONSOLE=y
@@ -552,6 +563,7 @@ CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
 CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
+CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_DH=m
 CONFIG_CRYPTO_ECDH=m
index 38e5bcbd0d62eaf42c7f0d6055446efbe45fa13f..8f2c0decb2f8edd8030ffc1df2eddad2bd70a939 100644 (file)
@@ -73,6 +73,7 @@ CONFIG_INET_XFRM_MODE_TUNNEL=m
 CONFIG_INET_XFRM_MODE_BEET=m
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
+CONFIG_INET_RAW_DIAG=m
 CONFIG_IPV6=m
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_INET6_AH=m
@@ -83,10 +84,10 @@ CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
 CONFIG_NF_CONNTRACK=m
+CONFIG_NF_LOG_NETDEV=m
 CONFIG_NF_CONNTRACK_ZONES=y
 # CONFIG_NF_CONNTRACK_PROCFS is not set
 # CONFIG_NF_CT_PROTO_DCCP is not set
-CONFIG_NF_CT_PROTO_UDPLITE=m
 CONFIG_NF_CONNTRACK_AMANDA=m
 CONFIG_NF_CONNTRACK_FTP=m
 CONFIG_NF_CONNTRACK_H323=m
@@ -102,6 +103,7 @@ CONFIG_NF_TABLES_INET=m
 CONFIG_NF_TABLES_NETDEV=m
 CONFIG_NFT_EXTHDR=m
 CONFIG_NFT_META=m
+CONFIG_NFT_RT=m
 CONFIG_NFT_NUMGEN=m
 CONFIG_NFT_CT=m
 CONFIG_NFT_SET_RBTREE=m
@@ -112,11 +114,13 @@ CONFIG_NFT_LIMIT=m
 CONFIG_NFT_MASQ=m
 CONFIG_NFT_REDIR=m
 CONFIG_NFT_NAT=m
+CONFIG_NFT_OBJREF=m
 CONFIG_NFT_QUEUE=m
 CONFIG_NFT_QUOTA=m
 CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
 CONFIG_NFT_HASH=m
+CONFIG_NFT_FIB_INET=m
 CONFIG_NFT_DUP_NETDEV=m
 CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NETFILTER_XT_SET=m
@@ -183,6 +187,7 @@ CONFIG_IP_SET_HASH_IPMARK=m
 CONFIG_IP_SET_HASH_IPPORT=m
 CONFIG_IP_SET_HASH_IPPORTIP=m
 CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_IPMAC=m
 CONFIG_IP_SET_HASH_MAC=m
 CONFIG_IP_SET_HASH_NETPORTNET=m
 CONFIG_IP_SET_HASH_NET=m
@@ -191,8 +196,10 @@ CONFIG_IP_SET_HASH_NETPORT=m
 CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_NF_SOCKET_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
 CONFIG_NFT_DUP_IPV4=m
+CONFIG_NFT_FIB_IPV4=m
 CONFIG_NF_TABLES_ARP=m
 CONFIG_NF_LOG_ARP=m
 CONFIG_NFT_CHAIN_NAT_IPV4=m
@@ -219,8 +226,10 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
+CONFIG_NF_SOCKET_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
 CONFIG_NFT_DUP_IPV6=m
+CONFIG_NFT_FIB_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
 CONFIG_NFT_REDIR_IPV6=m
@@ -304,6 +313,7 @@ CONFIG_NET_DEVLINK=m
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
 # CONFIG_FIRMWARE_IN_KERNEL is not set
+CONFIG_TEST_ASYNC_DRIVER_PROBE=m
 CONFIG_CONNECTOR=m
 CONFIG_PARPORT=m
 CONFIG_PARPORT_PC=m
@@ -400,6 +410,7 @@ CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
 CONFIG_VETH=m
 # CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NET_VENDOR_ALACRITECH is not set
 # CONFIG_NET_VENDOR_AMAZON is not set
 CONFIG_A2065=y
 CONFIG_ARIADNE=y
@@ -430,6 +441,7 @@ CONFIG_ZORRO8390=y
 # CONFIG_NET_VENDOR_ROCKER is not set
 # CONFIG_NET_VENDOR_SAMSUNG is not set
 # CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SOLARFLARE is not set
 CONFIG_SMC91X=y
 # CONFIG_NET_VENDOR_STMICRO is not set
 # CONFIG_NET_VENDOR_SYNOPSYS is not set
@@ -468,7 +480,6 @@ CONFIG_HP_SDC_RTC=m
 CONFIG_SERIO_Q40KBD=y
 CONFIG_USERIO=m
 # CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
 CONFIG_SERIAL_PMACZILOG=y
 CONFIG_SERIAL_PMACZILOG_TTYS=y
 CONFIG_SERIAL_PMACZILOG_CONSOLE=y
@@ -632,6 +643,7 @@ CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
 CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
+CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_DH=m
 CONFIG_CRYPTO_ECDH=m
index 28687192b68e4a74ba1d9b53f1732f7130b6be92..c743dd22e96f935f553a12648991546616a47291 100644 (file)
@@ -61,6 +61,7 @@ CONFIG_INET_XFRM_MODE_TUNNEL=m
 CONFIG_INET_XFRM_MODE_BEET=m
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
+CONFIG_INET_RAW_DIAG=m
 CONFIG_IPV6=m
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_INET6_AH=m
@@ -71,10 +72,10 @@ CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
 CONFIG_NF_CONNTRACK=m
+CONFIG_NF_LOG_NETDEV=m
 CONFIG_NF_CONNTRACK_ZONES=y
 # CONFIG_NF_CONNTRACK_PROCFS is not set
 # CONFIG_NF_CT_PROTO_DCCP is not set
-CONFIG_NF_CT_PROTO_UDPLITE=m
 CONFIG_NF_CONNTRACK_AMANDA=m
 CONFIG_NF_CONNTRACK_FTP=m
 CONFIG_NF_CONNTRACK_H323=m
@@ -90,6 +91,7 @@ CONFIG_NF_TABLES_INET=m
 CONFIG_NF_TABLES_NETDEV=m
 CONFIG_NFT_EXTHDR=m
 CONFIG_NFT_META=m
+CONFIG_NFT_RT=m
 CONFIG_NFT_NUMGEN=m
 CONFIG_NFT_CT=m
 CONFIG_NFT_SET_RBTREE=m
@@ -100,11 +102,13 @@ CONFIG_NFT_LIMIT=m
 CONFIG_NFT_MASQ=m
 CONFIG_NFT_REDIR=m
 CONFIG_NFT_NAT=m
+CONFIG_NFT_OBJREF=m
 CONFIG_NFT_QUEUE=m
 CONFIG_NFT_QUOTA=m
 CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
 CONFIG_NFT_HASH=m
+CONFIG_NFT_FIB_INET=m
 CONFIG_NFT_DUP_NETDEV=m
 CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NETFILTER_XT_SET=m
@@ -171,6 +175,7 @@ CONFIG_IP_SET_HASH_IPMARK=m
 CONFIG_IP_SET_HASH_IPPORT=m
 CONFIG_IP_SET_HASH_IPPORTIP=m
 CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_IPMAC=m
 CONFIG_IP_SET_HASH_MAC=m
 CONFIG_IP_SET_HASH_NETPORTNET=m
 CONFIG_IP_SET_HASH_NET=m
@@ -179,8 +184,10 @@ CONFIG_IP_SET_HASH_NETPORT=m
 CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_NF_SOCKET_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
 CONFIG_NFT_DUP_IPV4=m
+CONFIG_NFT_FIB_IPV4=m
 CONFIG_NF_TABLES_ARP=m
 CONFIG_NF_LOG_ARP=m
 CONFIG_NFT_CHAIN_NAT_IPV4=m
@@ -207,8 +214,10 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
+CONFIG_NF_SOCKET_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
 CONFIG_NFT_DUP_IPV6=m
+CONFIG_NFT_FIB_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
 CONFIG_NFT_REDIR_IPV6=m
@@ -289,6 +298,7 @@ CONFIG_NET_DEVLINK=m
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
 # CONFIG_FIRMWARE_IN_KERNEL is not set
+CONFIG_TEST_ASYNC_DRIVER_PROBE=m
 CONFIG_CONNECTOR=m
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_CRYPTOLOOP=m
@@ -348,6 +358,7 @@ CONFIG_MACSEC=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
 CONFIG_VETH=m
+# CONFIG_NET_VENDOR_ALACRITECH is not set
 # CONFIG_NET_VENDOR_AMAZON is not set
 CONFIG_MVME147_NET=y
 # CONFIG_NET_VENDOR_ARC is not set
@@ -364,6 +375,7 @@ CONFIG_MVME147_NET=y
 # CONFIG_NET_VENDOR_ROCKER is not set
 # CONFIG_NET_VENDOR_SAMSUNG is not set
 # CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SOLARFLARE is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
 # CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
@@ -389,7 +401,6 @@ CONFIG_INPUT_EVDEV=m
 # CONFIG_SERIO is not set
 CONFIG_VT_HW_CONSOLE_BINDING=y
 # CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
 # CONFIG_HW_RANDOM is not set
 CONFIG_NTP_PPS=y
 CONFIG_PPS_CLIENT_LDISC=m
@@ -520,6 +531,7 @@ CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
 CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
+CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_DH=m
 CONFIG_CRYPTO_ECDH=m
index 5a5f109ab3cdeb8ad992b2234f7e26a896039510..2ccaca858f0533d79d7f4c6a52bf070f333eb8ce 100644 (file)
@@ -62,6 +62,7 @@ CONFIG_INET_XFRM_MODE_TUNNEL=m
 CONFIG_INET_XFRM_MODE_BEET=m
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
+CONFIG_INET_RAW_DIAG=m
 CONFIG_IPV6=m
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_INET6_AH=m
@@ -72,10 +73,10 @@ CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
 CONFIG_NF_CONNTRACK=m
+CONFIG_NF_LOG_NETDEV=m
 CONFIG_NF_CONNTRACK_ZONES=y
 # CONFIG_NF_CONNTRACK_PROCFS is not set
 # CONFIG_NF_CT_PROTO_DCCP is not set
-CONFIG_NF_CT_PROTO_UDPLITE=m
 CONFIG_NF_CONNTRACK_AMANDA=m
 CONFIG_NF_CONNTRACK_FTP=m
 CONFIG_NF_CONNTRACK_H323=m
@@ -91,6 +92,7 @@ CONFIG_NF_TABLES_INET=m
 CONFIG_NF_TABLES_NETDEV=m
 CONFIG_NFT_EXTHDR=m
 CONFIG_NFT_META=m
+CONFIG_NFT_RT=m
 CONFIG_NFT_NUMGEN=m
 CONFIG_NFT_CT=m
 CONFIG_NFT_SET_RBTREE=m
@@ -101,11 +103,13 @@ CONFIG_NFT_LIMIT=m
 CONFIG_NFT_MASQ=m
 CONFIG_NFT_REDIR=m
 CONFIG_NFT_NAT=m
+CONFIG_NFT_OBJREF=m
 CONFIG_NFT_QUEUE=m
 CONFIG_NFT_QUOTA=m
 CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
 CONFIG_NFT_HASH=m
+CONFIG_NFT_FIB_INET=m
 CONFIG_NFT_DUP_NETDEV=m
 CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NETFILTER_XT_SET=m
@@ -172,6 +176,7 @@ CONFIG_IP_SET_HASH_IPMARK=m
 CONFIG_IP_SET_HASH_IPPORT=m
 CONFIG_IP_SET_HASH_IPPORTIP=m
 CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_IPMAC=m
 CONFIG_IP_SET_HASH_MAC=m
 CONFIG_IP_SET_HASH_NETPORTNET=m
 CONFIG_IP_SET_HASH_NET=m
@@ -180,8 +185,10 @@ CONFIG_IP_SET_HASH_NETPORT=m
 CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_NF_SOCKET_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
 CONFIG_NFT_DUP_IPV4=m
+CONFIG_NFT_FIB_IPV4=m
 CONFIG_NF_TABLES_ARP=m
 CONFIG_NF_LOG_ARP=m
 CONFIG_NFT_CHAIN_NAT_IPV4=m
@@ -208,8 +215,10 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
+CONFIG_NF_SOCKET_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
 CONFIG_NFT_DUP_IPV6=m
+CONFIG_NFT_FIB_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
 CONFIG_NFT_REDIR_IPV6=m
@@ -290,6 +299,7 @@ CONFIG_NET_DEVLINK=m
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
 # CONFIG_FIRMWARE_IN_KERNEL is not set
+CONFIG_TEST_ASYNC_DRIVER_PROBE=m
 CONFIG_CONNECTOR=m
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_CRYPTOLOOP=m
@@ -349,6 +359,7 @@ CONFIG_MACSEC=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
 CONFIG_VETH=m
+# CONFIG_NET_VENDOR_ALACRITECH is not set
 # CONFIG_NET_VENDOR_AMAZON is not set
 # CONFIG_NET_VENDOR_ARC is not set
 # CONFIG_NET_CADENCE is not set
@@ -364,6 +375,7 @@ CONFIG_MVME16x_NET=y
 # CONFIG_NET_VENDOR_ROCKER is not set
 # CONFIG_NET_VENDOR_SAMSUNG is not set
 # CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SOLARFLARE is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
 # CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
@@ -389,7 +401,6 @@ CONFIG_INPUT_EVDEV=m
 # CONFIG_SERIO is not set
 CONFIG_VT_HW_CONSOLE_BINDING=y
 # CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
 # CONFIG_HW_RANDOM is not set
 CONFIG_NTP_PPS=y
 CONFIG_PPS_CLIENT_LDISC=m
@@ -520,6 +531,7 @@ CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
 CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
+CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_DH=m
 CONFIG_CRYPTO_ECDH=m
index e557c9de3fbce9c8b9b2b809610f59769d00a540..5599f3fd5fcd44eab2e52ac460b73376be5cc052 100644 (file)
@@ -62,6 +62,7 @@ CONFIG_INET_XFRM_MODE_TUNNEL=m
 CONFIG_INET_XFRM_MODE_BEET=m
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
+CONFIG_INET_RAW_DIAG=m
 CONFIG_IPV6=m
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_INET6_AH=m
@@ -72,10 +73,10 @@ CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
 CONFIG_NF_CONNTRACK=m
+CONFIG_NF_LOG_NETDEV=m
 CONFIG_NF_CONNTRACK_ZONES=y
 # CONFIG_NF_CONNTRACK_PROCFS is not set
 # CONFIG_NF_CT_PROTO_DCCP is not set
-CONFIG_NF_CT_PROTO_UDPLITE=m
 CONFIG_NF_CONNTRACK_AMANDA=m
 CONFIG_NF_CONNTRACK_FTP=m
 CONFIG_NF_CONNTRACK_H323=m
@@ -91,6 +92,7 @@ CONFIG_NF_TABLES_INET=m
 CONFIG_NF_TABLES_NETDEV=m
 CONFIG_NFT_EXTHDR=m
 CONFIG_NFT_META=m
+CONFIG_NFT_RT=m
 CONFIG_NFT_NUMGEN=m
 CONFIG_NFT_CT=m
 CONFIG_NFT_SET_RBTREE=m
@@ -101,11 +103,13 @@ CONFIG_NFT_LIMIT=m
 CONFIG_NFT_MASQ=m
 CONFIG_NFT_REDIR=m
 CONFIG_NFT_NAT=m
+CONFIG_NFT_OBJREF=m
 CONFIG_NFT_QUEUE=m
 CONFIG_NFT_QUOTA=m
 CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
 CONFIG_NFT_HASH=m
+CONFIG_NFT_FIB_INET=m
 CONFIG_NFT_DUP_NETDEV=m
 CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NETFILTER_XT_SET=m
@@ -172,6 +176,7 @@ CONFIG_IP_SET_HASH_IPMARK=m
 CONFIG_IP_SET_HASH_IPPORT=m
 CONFIG_IP_SET_HASH_IPPORTIP=m
 CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_IPMAC=m
 CONFIG_IP_SET_HASH_MAC=m
 CONFIG_IP_SET_HASH_NETPORTNET=m
 CONFIG_IP_SET_HASH_NET=m
@@ -180,8 +185,10 @@ CONFIG_IP_SET_HASH_NETPORT=m
 CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_NF_SOCKET_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
 CONFIG_NFT_DUP_IPV4=m
+CONFIG_NFT_FIB_IPV4=m
 CONFIG_NF_TABLES_ARP=m
 CONFIG_NF_LOG_ARP=m
 CONFIG_NFT_CHAIN_NAT_IPV4=m
@@ -208,8 +215,10 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
+CONFIG_NF_SOCKET_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
 CONFIG_NFT_DUP_IPV6=m
+CONFIG_NFT_FIB_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
 CONFIG_NFT_REDIR_IPV6=m
@@ -290,6 +299,7 @@ CONFIG_NET_DEVLINK=m
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
 # CONFIG_FIRMWARE_IN_KERNEL is not set
+CONFIG_TEST_ASYNC_DRIVER_PROBE=m
 CONFIG_CONNECTOR=m
 CONFIG_PARPORT=m
 CONFIG_PARPORT_PC=m
@@ -356,6 +366,7 @@ CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
 CONFIG_VETH=m
 # CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NET_VENDOR_ALACRITECH is not set
 # CONFIG_NET_VENDOR_AMAZON is not set
 # CONFIG_NET_VENDOR_AMD is not set
 # CONFIG_NET_VENDOR_ARC is not set
@@ -374,6 +385,7 @@ CONFIG_NE2000=y
 # CONFIG_NET_VENDOR_ROCKER is not set
 # CONFIG_NET_VENDOR_SAMSUNG is not set
 # CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SOLARFLARE is not set
 # CONFIG_NET_VENDOR_SMSC is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
 # CONFIG_NET_VENDOR_SYNOPSYS is not set
@@ -404,7 +416,6 @@ CONFIG_INPUT_M68K_BEEP=m
 CONFIG_SERIO_Q40KBD=y
 CONFIG_USERIO=m
 # CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
 CONFIG_PRINTER=m
 # CONFIG_HW_RANDOM is not set
 CONFIG_NTP_PPS=y
@@ -543,6 +554,7 @@ CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
 CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
+CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_DH=m
 CONFIG_CRYPTO_ECDH=m
index c6a748a36daf53798fcf8f3f9a61cbdca133fb46..313bf0a562ad33496735210197879fcf86739ea2 100644 (file)
@@ -59,6 +59,7 @@ CONFIG_INET_XFRM_MODE_TUNNEL=m
 CONFIG_INET_XFRM_MODE_BEET=m
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
+CONFIG_INET_RAW_DIAG=m
 CONFIG_IPV6=m
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_INET6_AH=m
@@ -69,10 +70,10 @@ CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
 CONFIG_NF_CONNTRACK=m
+CONFIG_NF_LOG_NETDEV=m
 CONFIG_NF_CONNTRACK_ZONES=y
 # CONFIG_NF_CONNTRACK_PROCFS is not set
 # CONFIG_NF_CT_PROTO_DCCP is not set
-CONFIG_NF_CT_PROTO_UDPLITE=m
 CONFIG_NF_CONNTRACK_AMANDA=m
 CONFIG_NF_CONNTRACK_FTP=m
 CONFIG_NF_CONNTRACK_H323=m
@@ -88,6 +89,7 @@ CONFIG_NF_TABLES_INET=m
 CONFIG_NF_TABLES_NETDEV=m
 CONFIG_NFT_EXTHDR=m
 CONFIG_NFT_META=m
+CONFIG_NFT_RT=m
 CONFIG_NFT_NUMGEN=m
 CONFIG_NFT_CT=m
 CONFIG_NFT_SET_RBTREE=m
@@ -98,11 +100,13 @@ CONFIG_NFT_LIMIT=m
 CONFIG_NFT_MASQ=m
 CONFIG_NFT_REDIR=m
 CONFIG_NFT_NAT=m
+CONFIG_NFT_OBJREF=m
 CONFIG_NFT_QUEUE=m
 CONFIG_NFT_QUOTA=m
 CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
 CONFIG_NFT_HASH=m
+CONFIG_NFT_FIB_INET=m
 CONFIG_NFT_DUP_NETDEV=m
 CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NETFILTER_XT_SET=m
@@ -169,6 +173,7 @@ CONFIG_IP_SET_HASH_IPMARK=m
 CONFIG_IP_SET_HASH_IPPORT=m
 CONFIG_IP_SET_HASH_IPPORTIP=m
 CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_IPMAC=m
 CONFIG_IP_SET_HASH_MAC=m
 CONFIG_IP_SET_HASH_NETPORTNET=m
 CONFIG_IP_SET_HASH_NET=m
@@ -177,8 +182,10 @@ CONFIG_IP_SET_HASH_NETPORT=m
 CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_NF_SOCKET_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
 CONFIG_NFT_DUP_IPV4=m
+CONFIG_NFT_FIB_IPV4=m
 CONFIG_NF_TABLES_ARP=m
 CONFIG_NF_LOG_ARP=m
 CONFIG_NFT_CHAIN_NAT_IPV4=m
@@ -205,8 +212,10 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
+CONFIG_NF_SOCKET_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
 CONFIG_NFT_DUP_IPV6=m
+CONFIG_NFT_FIB_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
 CONFIG_NFT_REDIR_IPV6=m
@@ -287,6 +296,7 @@ CONFIG_NET_DEVLINK=m
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
 # CONFIG_FIRMWARE_IN_KERNEL is not set
+CONFIG_TEST_ASYNC_DRIVER_PROBE=m
 CONFIG_CONNECTOR=m
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_CRYPTOLOOP=m
@@ -346,6 +356,7 @@ CONFIG_MACSEC=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
 CONFIG_VETH=m
+# CONFIG_NET_VENDOR_ALACRITECH is not set
 # CONFIG_NET_VENDOR_AMAZON is not set
 CONFIG_SUN3LANCE=y
 # CONFIG_NET_VENDOR_ARC is not set
@@ -361,6 +372,7 @@ CONFIG_SUN3_82586=y
 # CONFIG_NET_VENDOR_ROCKER is not set
 # CONFIG_NET_VENDOR_SAMSUNG is not set
 # CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SOLARFLARE is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
 # CONFIG_NET_VENDOR_SUN is not set
 # CONFIG_NET_VENDOR_SYNOPSYS is not set
@@ -388,7 +400,6 @@ CONFIG_KEYBOARD_SUNKBD=y
 CONFIG_MOUSE_SERIAL=m
 CONFIG_USERIO=m
 # CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
 # CONFIG_HW_RANDOM is not set
 CONFIG_NTP_PPS=y
 CONFIG_PPS_CLIENT_LDISC=m
@@ -521,6 +532,7 @@ CONFIG_TEST_BPF=m
 CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
 CONFIG_TEST_STATIC_KEYS=m
+CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_DH=m
 CONFIG_CRYPTO_ECDH=m
index 10d60857b9a691703f3e4297963abeb05dcc40fa..38b61365f769273f829980fa9cec2d9cc585e1f5 100644 (file)
@@ -59,6 +59,7 @@ CONFIG_INET_XFRM_MODE_TUNNEL=m
 CONFIG_INET_XFRM_MODE_BEET=m
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
+CONFIG_INET_RAW_DIAG=m
 CONFIG_IPV6=m
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_INET6_AH=m
@@ -69,10 +70,10 @@ CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
 CONFIG_NF_CONNTRACK=m
+CONFIG_NF_LOG_NETDEV=m
 CONFIG_NF_CONNTRACK_ZONES=y
 # CONFIG_NF_CONNTRACK_PROCFS is not set
 # CONFIG_NF_CT_PROTO_DCCP is not set
-CONFIG_NF_CT_PROTO_UDPLITE=m
 CONFIG_NF_CONNTRACK_AMANDA=m
 CONFIG_NF_CONNTRACK_FTP=m
 CONFIG_NF_CONNTRACK_H323=m
@@ -88,6 +89,7 @@ CONFIG_NF_TABLES_INET=m
 CONFIG_NF_TABLES_NETDEV=m
 CONFIG_NFT_EXTHDR=m
 CONFIG_NFT_META=m
+CONFIG_NFT_RT=m
 CONFIG_NFT_NUMGEN=m
 CONFIG_NFT_CT=m
 CONFIG_NFT_SET_RBTREE=m
@@ -98,11 +100,13 @@ CONFIG_NFT_LIMIT=m
 CONFIG_NFT_MASQ=m
 CONFIG_NFT_REDIR=m
 CONFIG_NFT_NAT=m
+CONFIG_NFT_OBJREF=m
 CONFIG_NFT_QUEUE=m
 CONFIG_NFT_QUOTA=m
 CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
 CONFIG_NFT_HASH=m
+CONFIG_NFT_FIB_INET=m
 CONFIG_NFT_DUP_NETDEV=m
 CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NETFILTER_XT_SET=m
@@ -169,6 +173,7 @@ CONFIG_IP_SET_HASH_IPMARK=m
 CONFIG_IP_SET_HASH_IPPORT=m
 CONFIG_IP_SET_HASH_IPPORTIP=m
 CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_IPMAC=m
 CONFIG_IP_SET_HASH_MAC=m
 CONFIG_IP_SET_HASH_NETPORTNET=m
 CONFIG_IP_SET_HASH_NET=m
@@ -177,8 +182,10 @@ CONFIG_IP_SET_HASH_NETPORT=m
 CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_NF_SOCKET_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
 CONFIG_NFT_DUP_IPV4=m
+CONFIG_NFT_FIB_IPV4=m
 CONFIG_NF_TABLES_ARP=m
 CONFIG_NF_LOG_ARP=m
 CONFIG_NFT_CHAIN_NAT_IPV4=m
@@ -205,8 +212,10 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
+CONFIG_NF_SOCKET_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
 CONFIG_NFT_DUP_IPV6=m
+CONFIG_NFT_FIB_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
 CONFIG_NFT_REDIR_IPV6=m
@@ -287,6 +296,7 @@ CONFIG_NET_DEVLINK=m
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
 # CONFIG_FIRMWARE_IN_KERNEL is not set
+CONFIG_TEST_ASYNC_DRIVER_PROBE=m
 CONFIG_CONNECTOR=m
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_CRYPTOLOOP=m
@@ -346,6 +356,7 @@ CONFIG_MACSEC=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
 CONFIG_VETH=m
+# CONFIG_NET_VENDOR_ALACRITECH is not set
 # CONFIG_NET_VENDOR_AMAZON is not set
 CONFIG_SUN3LANCE=y
 # CONFIG_NET_VENDOR_ARC is not set
@@ -362,6 +373,7 @@ CONFIG_SUN3LANCE=y
 # CONFIG_NET_VENDOR_ROCKER is not set
 # CONFIG_NET_VENDOR_SAMSUNG is not set
 # CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SOLARFLARE is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
 # CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
@@ -388,7 +400,6 @@ CONFIG_KEYBOARD_SUNKBD=y
 CONFIG_MOUSE_SERIAL=m
 CONFIG_USERIO=m
 # CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
 # CONFIG_HW_RANDOM is not set
 CONFIG_NTP_PPS=y
 CONFIG_PPS_CLIENT_LDISC=m
@@ -522,6 +533,7 @@ CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
 CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
+CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_DH=m
 CONFIG_CRYPTO_ECDH=m
index 1f2e5d31cb240997f0bbb64dfb5bd98ae4b4d33e..6c76d6c24b3d0d3f206d9c9600adf6eb040eed4f 100644 (file)
@@ -1,7 +1,6 @@
 generic-y += barrier.h
 generic-y += bitsperlong.h
 generic-y += clkdev.h
-generic-y += cputime.h
 generic-y += device.h
 generic-y += emergency-restart.h
 generic-y += errno.h
index ef9a2e47352f0d8a03c55ccb2c32966b1e0d748f..5bc8d91d68d42bf0e046fd046e3f5c9a623a7df6 100644 (file)
@@ -6,12 +6,12 @@
 #ifdef CONFIG_DEBUG_BUGVERBOSE
 #ifndef CONFIG_SUN3
 #define BUG() do { \
-       printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \
+       pr_crit("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \
        __builtin_trap(); \
 } while (0)
 #else
 #define BUG() do { \
-       printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \
+       pr_crit("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \
        panic("BUG!"); \
 } while (0)
 #endif
index 47365b1ccbecfeb9c87f807ad5ab345649572114..c3b9ad6732fc68cc78a65db116372d7055318a6b 100644 (file)
@@ -234,9 +234,9 @@ asmlinkage irqreturn_t floppy_hardint(int irq, void *dev_id)
                virtual_dma_residue += virtual_dma_count;
                virtual_dma_count=0;
 #ifdef TRACE_FLPY_INT
-               printk("count=%x, residue=%x calls=%d bytes=%d dma_wait=%d\n",
-                      virtual_dma_count, virtual_dma_residue, calls, bytes,
-                      dma_wait);
+               pr_info("count=%x, residue=%x calls=%d bytes=%d dma_wait=%d\n",
+                       virtual_dma_count, virtual_dma_residue, calls, bytes,
+                       dma_wait);
                calls = 0;
                dma_wait=0;
 #endif
index 92aa8a4c2d03f3d1594da73ca85357528158f514..cddb2d3ea49b75e292d5fa8df032e83e89c538c8 100644 (file)
 
 #include <asm/irq.h>
 
-/* Setting this prints debugging info for unclaimed interrupts */
-
-#define DEBUG_SPURIOUS
-
-/* Setting this prints debugging info on each autovector interrupt */
-
-/* #define DEBUG_IRQS */
-
-/* Setting this prints debugging info on each Nubus interrupt */
-
-/* #define DEBUG_NUBUS_INT */
-
-/* Setting this prints debugging info on irqs as they enabled and disabled. */
-
-/* #define DEBUG_IRQUSE */
-
 /*
  * Base IRQ number for all Mac68K interrupt sources. Each source
  * has eight indexes (base -> base+7).
index 5e9249b0014ca0fcd2faa088bfb8f9d5810c003d..b062696d5a0d99aa3b9f42fe23977c704060d4e0 100644 (file)
@@ -105,21 +105,21 @@ struct fp_data {
 #ifdef FPU_EMU_DEBUG
 extern unsigned int fp_debugprint;
 
-#define dprint(bit, fmt, args...) ({                   \
+#define dprint(bit, fmt, ...) ({                       \
        if (fp_debugprint & (1 << (bit)))               \
-               printk(fmt, ## args);                   \
+               pr_info(fmt, ##__VA_ARGS__);            \
 })
 #else
-#define dprint(bit, fmt, args...)
+#define dprint(bit, fmt, ...)  no_printk(fmt, ##__VA_ARGS__)
 #endif
 
 #define uprint(str) ({                                 \
        static int __count = 3;                         \
                                                        \
        if (__count > 0) {                              \
-               printk("You just hit an unimplemented " \
+               pr_err("You just hit an unimplemented " \
                       "fpu instruction (%s)\n", str);  \
-               printk("Please report this to ....\n"); \
+               pr_err("Please report this to ....\n"); \
                __count--;                              \
        }                                               \
 })
index 48657f9fdeceac19911130401a20692385d5d496..d5104a7b5388535fbc830a3bdaa4344725876533 100644 (file)
@@ -151,11 +151,11 @@ static inline void pgd_clear (pgd_t *pgdp) {}
 
 
 #define pte_ERROR(e) \
-       printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
+       pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
 #define pmd_ERROR(e) \
-       printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
+       pr_err("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
 #define pgd_ERROR(e) \
-       printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
+       pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
 
 
 /*
index a02ea3a7bb20a0299e0f1bd2bf2306c484a4bc92..159269b7f2e8d61e55e283f94d7e55232a210dc3 100644 (file)
@@ -48,7 +48,7 @@ static unsigned char sun3x_82072_fd_inb(int port)
 //     udelay(5);
        switch(port & 7) {
        default:
-               printk("floppy: Asked to read unknown port %d\n", port);
+               pr_crit("floppy: Asked to read unknown port %d\n", port);
                panic("floppy: Port bolixed.");
        case 4: /* FD_STATUS */
                return (*sun3x_fdc.status_r) & ~STATUS_DMA;
@@ -70,7 +70,7 @@ static void sun3x_82072_fd_outb(unsigned char value, int port)
 //     udelay(5);
        switch(port & 7) {
        default:
-               printk("floppy: Asked to write to unknown port %d\n", port);
+               pr_crit("floppy: Asked to write to unknown port %d\n", port);
                panic("floppy: Port bolixed.");
        case 2: /* FD_DOR */
                /* Oh geese, 82072 on the Sun has no DOR register,
@@ -127,7 +127,7 @@ asmlinkage irqreturn_t sun3xflop_hardint(int irq, void *dev_id)
                return IRQ_HANDLED;
        }
 
-//     printk("doing pdma\n");// st %x\n", sun_fdc->status_82072);
+//     pr_info("doing pdma\n");// st %x\n", sun_fdc->status_82072);
 
 #ifdef TRACE_FLPY_INT
        if(!calls)
@@ -171,7 +171,7 @@ asmlinkage irqreturn_t sun3xflop_hardint(int irq, void *dev_id)
 #ifdef TRACE_FLPY_INT
        calls++;
 #endif
-//     printk("st=%02x\n", st);
+//     pr_info("st=%02x\n", st);
        if(st == 0x20)
                return IRQ_HANDLED;
        if(!(st & 0x20)) {
@@ -180,9 +180,9 @@ asmlinkage irqreturn_t sun3xflop_hardint(int irq, void *dev_id)
                doing_pdma = 0;
 
 #ifdef TRACE_FLPY_INT
-               printk("count=%x, residue=%x calls=%d bytes=%x dma_wait=%d\n",
-                      virtual_dma_count, virtual_dma_residue, calls, bytes,
-                      dma_wait);
+               pr_info("count=%x, residue=%x calls=%d bytes=%x dma_wait=%d\n",
+                       virtual_dma_count, virtual_dma_residue, calls, bytes,
+                       dma_wait);
                calls = 0;
                dma_wait=0;
 #endif
index 07070065a4256d6b237da811e552950d16e2e24c..1e4f386ba31e22bfe96182b7253b620504360077 100644 (file)
@@ -110,8 +110,8 @@ static void m68k_dma_sync_single_for_device(struct device *dev,
                cache_clear(handle, size);
                break;
        default:
-               if (printk_ratelimit())
-                       printk("dma_sync_single_for_device: unsupported dir %u\n", dir);
+               pr_err_ratelimited("dma_sync_single_for_device: unsupported dir %u\n",
+                                  dir);
                break;
        }
 }
index eb46fd6038cac73ab3b94b55d7c8e321f8b9ff00..aaac2da318ffd37751501fc80c79d90bdd4a7d68 100644 (file)
@@ -12,9 +12,9 @@
 #include <linux/kernel.h>
 
 #if 0
-#define DEBUGP printk
+#define DEBUGP(fmt, ...) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
 #else
-#define DEBUGP(fmt...)
+#define DEBUGP(fmt, ...) no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
 #endif
 
 #ifdef CONFIG_MODULES
@@ -51,8 +51,8 @@ int apply_relocate(Elf32_Shdr *sechdrs,
                        *location += sym->st_value - (uint32_t)location;
                        break;
                default:
-                       printk(KERN_ERR "module %s: Unknown relocation: %u\n",
-                              me->name, ELF32_R_TYPE(rel[i].r_info));
+                       pr_err("module %s: Unknown relocation: %u\n", me->name,
+                              ELF32_R_TYPE(rel[i].r_info));
                        return -ENOEXEC;
                }
        }
@@ -91,8 +91,8 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
                        *location = rel[i].r_addend + sym->st_value - (uint32_t)location;
                        break;
                default:
-                       printk(KERN_ERR "module %s: Unknown relocation: %u\n",
-                              me->name, ELF32_R_TYPE(rel[i].r_info));
+                       pr_err("module %s: Unknown relocation: %u\n", me->name,
+                              ELF32_R_TYPE(rel[i].r_info));
                        return -ENOEXEC;
                }
        }
index aaf28f8e342d27885f6dcbdca74d8f11b43d23af..f0a8e9b332cda75202b663b8ce81949c718c80da 100644 (file)
@@ -87,17 +87,17 @@ EXPORT_SYMBOL(pm_power_off);
 
 void show_regs(struct pt_regs * regs)
 {
-       printk("\n");
-       printk("Format %02x  Vector: %04x  PC: %08lx  Status: %04x    %s\n",
-              regs->format, regs->vector, regs->pc, regs->sr, print_tainted());
-       printk("ORIG_D0: %08lx  D0: %08lx  A2: %08lx  A1: %08lx\n",
-              regs->orig_d0, regs->d0, regs->a2, regs->a1);
-       printk("A0: %08lx  D5: %08lx  D4: %08lx\n",
-              regs->a0, regs->d5, regs->d4);
-       printk("D3: %08lx  D2: %08lx  D1: %08lx\n",
-              regs->d3, regs->d2, regs->d1);
+       pr_info("Format %02x  Vector: %04x  PC: %08lx  Status: %04x    %s\n",
+               regs->format, regs->vector, regs->pc, regs->sr,
+               print_tainted());
+       pr_info("ORIG_D0: %08lx  D0: %08lx  A2: %08lx  A1: %08lx\n",
+               regs->orig_d0, regs->d0, regs->a2, regs->a1);
+       pr_info("A0: %08lx  D5: %08lx  D4: %08lx\n", regs->a0, regs->d5,
+               regs->d4);
+       pr_info("D3: %08lx  D2: %08lx  D1: %08lx\n", regs->d3, regs->d2,
+               regs->d1);
        if (!(regs->sr & PS_S))
-               printk("USP: %08lx\n", rdusp());
+               pr_info("USP: %08lx\n", rdusp());
 }
 
 void flush_thread(void)
index 8ead291a902a346765fe995eee1e39ad1171e542..093b7c42fb85ae82c673cd93c71bf3f16e7123f2 100644 (file)
@@ -598,9 +598,7 @@ static int mangle_kernel_stack(struct pt_regs *regs, int formatvec,
                /*
                 * user process trying to return with weird frame format
                 */
-#ifdef DEBUG
-               printk("user process returning with weird frame format\n");
-#endif
+               pr_debug("user process returning with weird frame format\n");
                return 1;
        }
        if (!fsize) {
@@ -846,10 +844,8 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set,
        int err = 0, sig = ksig->sig;
 
        if (fsize < 0) {
-#ifdef DEBUG
-               printk ("setup_frame: Unknown frame format %#x\n",
-                       regs->format);
-#endif
+               pr_debug("setup_frame: Unknown frame format %#x\n",
+                        regs->format);
                return -EFAULT;
        }
 
@@ -905,9 +901,7 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set,
        if (regs->stkadj) {
                struct pt_regs *tregs =
                        (struct pt_regs *)((ulong)regs + regs->stkadj);
-#ifdef DEBUG
-               printk("Performing stackadjust=%04x\n", regs->stkadj);
-#endif
+               pr_debug("Performing stackadjust=%04lx\n", regs->stkadj);
                /* This must be copied with decreasing addresses to
                    handle overlaps.  */
                tregs->vector = 0;
@@ -926,10 +920,8 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
        int err = 0, sig = ksig->sig;
 
        if (fsize < 0) {
-#ifdef DEBUG
-               printk ("setup_frame: Unknown frame format %#x\n",
-                       regs->format);
-#endif
+               pr_debug("setup_frame: Unknown frame format %#x\n",
+                        regs->format);
                return -EFAULT;
        }
 
@@ -993,9 +985,7 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
        if (regs->stkadj) {
                struct pt_regs *tregs =
                        (struct pt_regs *)((ulong)regs + regs->stkadj);
-#ifdef DEBUG
-               printk("Performing stackadjust=%04x\n", regs->stkadj);
-#endif
+               pr_debug("Performing stackadjust=%04lx\n", regs->stkadj);
                /* This must be copied with decreasing addresses to
                    handle overlaps.  */
                tregs->vector = 0;
index 98a2daaae30cdb18526af2eda1d606040ac5ac37..933e4815dac8609baa213f495c7c0ec4005d3c7f 100644 (file)
@@ -398,7 +398,6 @@ sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
                 * Verify that the specified address region actually belongs
                 * to this process.
                 */
-               ret = -EINVAL;
                down_read(&current->mm->mmap_sem);
                vma = find_vma(current->mm, addr);
                if (!vma || addr < vma->vm_start || addr + len > vma->vm_end)
index b3536a82a26202b65d81b4c9d2cffc068c3a22c9..b29c3b241e1bb590eba500918e39e4ce9bc59796 100644 (file)
@@ -83,8 +83,7 @@ static void __init parse_uboot_commandline(char *commandp, int size)
                initrd_start = uboot_initrd_start;
                initrd_end = uboot_initrd_end;
                ROOT_DEV = Root_RAM0;
-               printk(KERN_INFO "initrd at 0x%lx:0x%lx\n",
-                       initrd_start, initrd_end);
+               pr_info("initrd at 0x%lx:0x%lx\n", initrd_start, initrd_end);
        }
 #endif /* if defined(CONFIG_BLK_DEV_INITRD) */
 }
index f6f7d42713ec8b95ad7aa19a4be08156cf261697..514acde3cd4005067adf3a0f7adddbc2334ea263 100644 (file)
@@ -14,8 +14,6 @@
 #include <asm/macints.h>
 #include <asm/mac_baboon.h>
 
-/* #define DEBUG_IRQS */
-
 int baboon_present;
 static volatile struct baboon *baboon;
 
@@ -50,12 +48,6 @@ static void baboon_irq(struct irq_desc *desc)
        int irq_bit, irq_num;
        unsigned char events;
 
-#ifdef DEBUG_IRQS
-       printk("baboon_irq: mb_control %02X mb_ifr %02X mb_status %02X\n",
-               (uint) baboon->mb_control, (uint) baboon->mb_ifr,
-               (uint) baboon->mb_status);
-#endif
-
        events = baboon->mb_ifr & 0x07;
        if (!events)
                return;
@@ -97,18 +89,10 @@ void __init baboon_register_interrupts(void)
 
 void baboon_irq_enable(int irq)
 {
-#ifdef DEBUG_IRQUSE
-       printk("baboon_irq_enable(%d)\n", irq);
-#endif
-
        mac_irq_enable(irq_get_irq_data(IRQ_NUBUS_C));
 }
 
 void baboon_irq_disable(int irq)
 {
-#ifdef DEBUG_IRQUSE
-       printk("baboon_irq_disable(%d)\n", irq);
-#endif
-
        mac_irq_disable(irq_get_irq_data(IRQ_NUBUS_C));
 }
index 9f98c08719010e272aa2f6f2464bc5b53f3a1427..b5cd06df71fd74970e9b16ef20212600ebb43658 100644 (file)
 #include <asm/hwtest.h>
 #include <asm/irq_regs.h>
 
-#define SHUTUP_SONIC
-
-/*
- * console_loglevel determines NMI handler function
- */
+extern void show_registers(struct pt_regs *);
 
 irqreturn_t mac_nmi_handler(int, void *);
-irqreturn_t mac_debug_handler(int, void *);
-
-/* #define DEBUG_MACINTS */
 
 static unsigned int mac_irq_startup(struct irq_data *);
 static void mac_irq_shutdown(struct irq_data *);
@@ -149,21 +142,8 @@ static struct irq_chip mac_irq_chip = {
 
 void __init mac_init_IRQ(void)
 {
-#ifdef DEBUG_MACINTS
-       printk("mac_init_IRQ(): Setting things up...\n");
-#endif
        m68k_setup_irq_controller(&mac_irq_chip, handle_simple_irq, IRQ_USER,
                                  NUM_MAC_SOURCES - IRQ_USER);
-       /* Make sure the SONIC interrupt is cleared or things get ugly */
-#ifdef SHUTUP_SONIC
-       printk("Killing onboard sonic... ");
-       /* This address should hopefully be mapped already */
-       if (hwreg_present((void*)(0x50f0a000))) {
-               *(long *)(0x50f0a014) = 0x7fffL;
-               *(long *)(0x50f0a010) = 0L;
-       }
-       printk("Done.\n");
-#endif /* SHUTUP_SONIC */
 
        /*
         * Now register the handlers for the master IRQ handlers
@@ -182,9 +162,6 @@ void __init mac_init_IRQ(void)
        if (request_irq(IRQ_AUTO_7, mac_nmi_handler, 0, "NMI",
                        mac_nmi_handler))
                pr_err("Couldn't register NMI\n");
-#ifdef DEBUG_MACINTS
-       printk("mac_init_IRQ(): Done!\n");
-#endif
 }
 
 /*
@@ -276,65 +253,17 @@ static void mac_irq_shutdown(struct irq_data *data)
                mac_irq_disable(data);
 }
 
-static int num_debug[8];
-
-irqreturn_t mac_debug_handler(int irq, void *dev_id)
-{
-       if (num_debug[irq] < 10) {
-               printk("DEBUG: Unexpected IRQ %d\n", irq);
-               num_debug[irq]++;
-       }
-       return IRQ_HANDLED;
-}
-
-static int in_nmi;
-static volatile int nmi_hold;
+static volatile int in_nmi;
 
 irqreturn_t mac_nmi_handler(int irq, void *dev_id)
 {
-       int i;
-       /*
-        * generate debug output on NMI switch if 'debug' kernel option given
-        * (only works with Penguin!)
-        */
+       if (in_nmi)
+               return IRQ_HANDLED;
+       in_nmi = 1;
 
-       in_nmi++;
-       for (i=0; i<100; i++)
-               udelay(1000);
-
-       if (in_nmi == 1) {
-               nmi_hold = 1;
-               printk("... pausing, press NMI to resume ...");
-       } else {
-               printk(" ok!\n");
-               nmi_hold = 0;
-       }
+       pr_info("Non-Maskable Interrupt\n");
+       show_registers(get_irq_regs());
 
-       barrier();
-
-       while (nmi_hold == 1)
-               udelay(1000);
-
-       if (console_loglevel >= 8) {
-#if 0
-               struct pt_regs *fp = get_irq_regs();
-               show_state();
-               printk("PC: %08lx\nSR: %04x  SP: %p\n", fp->pc, fp->sr, fp);
-               printk("d0: %08lx    d1: %08lx    d2: %08lx    d3: %08lx\n",
-                      fp->d0, fp->d1, fp->d2, fp->d3);
-               printk("d4: %08lx    d5: %08lx    a0: %08lx    a1: %08lx\n",
-                      fp->d4, fp->d5, fp->a0, fp->a1);
-
-               if (STACK_MAGIC != *(unsigned long *)current->kernel_stack_page)
-                       printk("Corrupted stack page\n");
-               printk("Process %s (pid: %d, stackpage=%08lx)\n",
-                       current->comm, current->pid, current->kernel_stack_page);
-               if (intr_count == 1)
-                       dump_stack((struct frame *)fp);
-#else
-               /* printk("NMI "); */
-#endif
-       }
-       in_nmi--;
+       in_nmi = 0;
        return IRQ_HANDLED;
 }
index c6d351f5bd79aacd8faa7a091fb3b24325547635..3b1f7a6159f8ce414fc57ef72985eac8804280c7 100644 (file)
@@ -4,7 +4,6 @@
 
 #include <linux/types.h>
 #include <linux/errno.h>
-#include <linux/miscdevice.h>
 #include <linux/kernel.h>
 #include <linux/delay.h>
 #include <linux/sched.h>
index 55d6592783f55710d0b23cae075f657252826650..ca84dcf41fc971dd5518b2f67bf6b56e2cce8f62 100644 (file)
@@ -68,15 +68,6 @@ static void oss_irq(struct irq_desc *desc)
        int events = oss->irq_pending &
                (OSS_IP_IOPSCC | OSS_IP_SCSI | OSS_IP_IOPISM);
 
-#ifdef DEBUG_IRQS
-       if ((console_loglevel == 10) && !(events & OSS_IP_SCSI)) {
-               unsigned int irq = irq_desc_get_irq(desc);
-
-               printk("oss_irq: irq %u events = 0x%04X\n", irq,
-                       (int) oss->irq_pending);
-       }
-#endif
-
        if (events & OSS_IP_IOPSCC) {
                oss->irq_pending &= ~OSS_IP_IOPSCC;
                generic_handle_irq(IRQ_MAC_SCC);
@@ -107,11 +98,6 @@ static void oss_nubus_irq(struct irq_desc *desc)
        if (!events)
                return;
 
-#ifdef DEBUG_NUBUS_INT
-       if (console_loglevel > 7) {
-               printk("oss_nubus_irq: events = 0x%04X\n", events);
-       }
-#endif
        /* There are only six slots on the OSS, not seven */
 
        i = 6;
@@ -163,9 +149,6 @@ void __init oss_register_interrupts(void)
  */
 
 void oss_irq_enable(int irq) {
-#ifdef DEBUG_IRQUSE
-       printk("oss_irq_enable(%d)\n", irq);
-#endif
        switch(irq) {
                case IRQ_MAC_SCC:
                        oss->irq_level[OSS_IOPSCC] = OSS_IRQLEV_IOPSCC;
@@ -199,9 +182,6 @@ void oss_irq_enable(int irq) {
  */
 
 void oss_irq_disable(int irq) {
-#ifdef DEBUG_IRQUSE
-       printk("oss_irq_disable(%d)\n", irq);
-#endif
        switch(irq) {
                case IRQ_MAC_SCC:
                        oss->irq_level[OSS_IOPSCC] = 0;
index cb2b1a3a2b62d97fb12aa14993749cab1de85544..439a2a2e58743d9686cc5e5290a6bebd5a979405 100644 (file)
@@ -122,11 +122,6 @@ static void psc_irq(struct irq_desc *desc)
        int irq_num;
        unsigned char irq_bit, events;
 
-#ifdef DEBUG_IRQS
-       printk("psc_irq: irq %u pIFR = 0x%02X pIER = 0x%02X\n",
-               irq, (int) psc_read_byte(pIFR), (int) psc_read_byte(pIER));
-#endif
-
        events = psc_read_byte(pIFR) & psc_read_byte(pIER) & 0xF;
        if (!events)
                return;
@@ -160,9 +155,6 @@ void psc_irq_enable(int irq) {
        int irq_idx     = IRQ_IDX(irq);
        int pIER        = pIERbase + (irq_src << 4);
 
-#ifdef DEBUG_IRQUSE
-       printk("psc_irq_enable(%d)\n", irq);
-#endif
        psc_write_byte(pIER, (1 << irq_idx) | 0x80);
 }
 
@@ -171,8 +163,5 @@ void psc_irq_disable(int irq) {
        int irq_idx     = IRQ_IDX(irq);
        int pIER        = pIERbase + (irq_src << 4);
 
-#ifdef DEBUG_IRQUSE
-       printk("psc_irq_disable(%d)\n", irq);
-#endif
        psc_write_byte(pIER, 1 << irq_idx);
 }
index 920ff63d4a81d77af96849d3e7c7f802da8be4a7..16629e91feba517f08d8b2ff09914871d247c681 100644 (file)
@@ -550,10 +550,6 @@ void via_irq_enable(int irq) {
        int irq_src     = IRQ_SRC(irq);
        int irq_idx     = IRQ_IDX(irq);
 
-#ifdef DEBUG_IRQUSE
-       printk(KERN_DEBUG "via_irq_enable(%d)\n", irq);
-#endif
-
        if (irq_src == 1) {
                via1[vIER] = IER_SET_BIT(irq_idx);
        } else if (irq_src == 2) {
@@ -582,10 +578,6 @@ void via_irq_disable(int irq) {
        int irq_src     = IRQ_SRC(irq);
        int irq_idx     = IRQ_IDX(irq);
 
-#ifdef DEBUG_IRQUSE
-       printk(KERN_DEBUG "via_irq_disable(%d)\n", irq);
-#endif
-
        if (irq_src == 1) {
                via1[vIER] = IER_CLR_BIT(irq_idx);
        } else if (irq_src == 2) {
index 9c1e656b1f8f581727daf981a1d747beb88d1651..a6ffead9bef5df15cfc0f473f0be2cc853f4cf80 100644 (file)
@@ -66,7 +66,7 @@ void __init m68k_setup_node(int node)
        end = (unsigned long)phys_to_virt(info->addr + info->size - 1) >> __virt_to_node_shift();
        for (; i <= end; i++) {
                if (pg_data_table[i])
-                       printk("overlap at %u for chunk %u\n", i, node);
+                       pr_warn("overlap at %u for chunk %u\n", i, node);
                pg_data_table[i] = pg_data_map + node;
        }
 #endif
index 51bc9d258ede39b8e9dd244b2a493974ed6ed552..4902b681a9fccf3f1cfa7db758e8054c67ad441e 100644 (file)
@@ -47,9 +47,7 @@ void __init init_pointer_table(unsigned long ptable)
        }
 
        PD_MARKBITS(dp) &= ~mask;
-#ifdef DEBUG
-       printk("init_pointer_table: %lx, %x\n", ptable, PD_MARKBITS(dp));
-#endif
+       pr_debug("init_pointer_table: %lx, %x\n", ptable, PD_MARKBITS(dp));
 
        /* unreserve the page so it's possible to free that page */
        PD_PAGE(dp)->flags &= ~(1 << PG_reserved);
index 3dc41158c05e973af1a57d9eb43b48fa1db87677..ae03555449b85d0e606eba2113a5bfb6964051fa 100644 (file)
@@ -40,6 +40,7 @@ static inline void do_page_mapin(unsigned long phys, unsigned long virt,
        sun3_put_pte(virt, pte);
 
 #ifdef SUN3_KMAP_DEBUG
+       pr_info("mapin:");
        print_pte_vaddr(virt);
 #endif
 
@@ -80,8 +81,8 @@ void __iomem *sun3_ioremap(unsigned long phys, unsigned long size,
                return NULL;
 
 #ifdef SUN3_KMAP_DEBUG
-       printk("ioremap: got virt %p size %lx(%lx)\n",
-              area->addr, size, area->size);
+       pr_info("ioremap: got virt %p size %lx(%lx)\n", area->addr, size,
+               area->size);
 #endif
 
        pages = size / PAGE_SIZE;
index b5b7d53f7283833b364ffa7f4ec86d0727fb4c4e..177d776de1a09d5557984b9597b692c361c068c6 100644 (file)
@@ -44,9 +44,6 @@ void __init paging_init(void)
        unsigned long zones_size[MAX_NR_ZONES] = { 0, };
        unsigned long size;
 
-#ifdef TEST_VERIFY_AREA
-       wp_works_ok = 0;
-#endif
        empty_zero_page = alloc_bootmem_pages(PAGE_SIZE);
 
        address = PAGE_OFFSET;
index c11d38dfad08faa12881276e0f41b1b7473d0dec..8778612d1f312d5c2c7e588a8b8828adce68eca6 100644 (file)
@@ -63,7 +63,7 @@ int __init mvme147_parse_bootinfo(const struct bi_record *bi)
 
 void mvme147_reset(void)
 {
-       printk ("\r\n\nCalled mvme147_reset\r\n");
+       pr_info("\r\n\nCalled mvme147_reset\r\n");
        m147_pcc->watchdog = 0x0a;      /* Clear timer */
        m147_pcc->watchdog = 0xa5;      /* Enable watchdog - 100ms to reset */
        while (1)
index 58e240939d265f69abd5c8c578587178f0f0f27d..6fa06d4d16bf08d41a56c705dd5699485b0e3516 100644 (file)
@@ -72,8 +72,8 @@ int __init mvme16x_parse_bootinfo(const struct bi_record *bi)
 
 void mvme16x_reset(void)
 {
-       printk ("\r\n\nCalled mvme16x_reset\r\n"
-                       "\r\r\r\r\r\r\r\r\r\r\r\r\r\r\r\r\r\r");
+       pr_info("\r\n\nCalled mvme16x_reset\r\n"
+               "\r\r\r\r\r\r\r\r\r\r\r\r\r\r\r\r\r\r");
        /* The string of returns is to delay the reset until the whole
         * message is output.  Assert reset bit in GCSR */
        *(volatile char *)0xfff40107 = 0x80;
@@ -289,7 +289,7 @@ void __init config_mvme16x(void)
 
     if (strncmp("BDID", p->bdid, 4))
     {
-       printk ("\n\nBug call .BRD_ID returned garbage - giving up\n\n");
+       pr_crit("Bug call .BRD_ID returned garbage - giving up\n");
        while (1)
                ;
     }
@@ -298,25 +298,25 @@ void __init config_mvme16x(void)
        vme_brdtype = brdno;
 
     mvme16x_get_model(id);
-    printk ("\nBRD_ID: %s   BUG %x.%x %02x/%02x/%02x\n", id, p->rev>>4,
-                                       p->rev&0xf, p->yr, p->mth, p->day);
+    pr_info("BRD_ID: %s   BUG %x.%x %02x/%02x/%02x\n", id, p->rev >> 4,
+           p->rev & 0xf, p->yr, p->mth, p->day);
     if (brdno == 0x0162 || brdno == 0x172)
     {
        unsigned char rev = *(unsigned char *)MVME162_VERSION_REG;
 
        mvme16x_config = rev | MVME16x_CONFIG_GOT_SCCA;
 
-       printk ("MVME%x Hardware status:\n", brdno);
-       printk ("    CPU Type           68%s040\n",
-                       rev & MVME16x_CONFIG_GOT_FPU ? "" : "LC");
-       printk ("    CPU clock          %dMHz\n",
-                       rev & MVME16x_CONFIG_SPEED_32 ? 32 : 25);
-       printk ("    VMEchip2           %spresent\n",
-                       rev & MVME16x_CONFIG_NO_VMECHIP2 ? "NOT " : "");
-       printk ("    SCSI interface     %spresent\n",
-                       rev & MVME16x_CONFIG_NO_SCSICHIP ? "NOT " : "");
-       printk ("    Ethernet interface %spresent\n",
-                       rev & MVME16x_CONFIG_NO_ETHERNET ? "NOT " : "");
+       pr_info("MVME%x Hardware status:\n", brdno);
+       pr_info("    CPU Type           68%s040\n",
+               rev & MVME16x_CONFIG_GOT_FPU ? "" : "LC");
+       pr_info("    CPU clock          %dMHz\n",
+               rev & MVME16x_CONFIG_SPEED_32 ? 32 : 25);
+       pr_info("    VMEchip2           %spresent\n",
+               rev & MVME16x_CONFIG_NO_VMECHIP2 ? "NOT " : "");
+       pr_info("    SCSI interface     %spresent\n",
+               rev & MVME16x_CONFIG_NO_SCSICHIP ? "NOT " : "");
+       pr_info("    Ethernet interface %spresent\n",
+               rev & MVME16x_CONFIG_NO_ETHERNET ? "NOT " : "");
     }
     else
     {
index 8f00847a0e4be37940d6c3292b21e915b7c1de1d..7b24577a7bd0b24c107ee05d479375c1bd9594a7 100644 (file)
@@ -158,7 +158,7 @@ static int __init rtc_MK48T08_init(void)
        if (!MACH_IS_MVME16x)
                return -ENODEV;
 
-       printk(KERN_INFO "MK48T08 Real Time Clock Driver v%s\n", RTC_VERSION);
+       pr_info("MK48T08 Real Time Clock Driver v%s\n", RTC_VERSION);
        return misc_register(&rtc_dev);
 }
 device_initcall(rtc_MK48T08_init);
index ea89a24f46000e4fb07d2192231eeab171a9c8b2..71c0867ecf20f201a99950ad584378d2607a0383 100644 (file)
@@ -84,7 +84,7 @@ static int __init q40_debug_setup(char *arg)
 {
        /* useful for early debugging stages - writes kernel messages into SRAM */
        if (MACH_IS_Q40 && !strncmp(arg, "mem", 3)) {
-               /*printk("using NVRAM debug, q40_mem_cptr=%p\n",q40_mem_cptr);*/
+               /*pr_info("using NVRAM debug, q40_mem_cptr=%p\n",q40_mem_cptr);*/
                _cpleft = 2000 - ((long)q40_mem_cptr-0xff020000) / 4;
                register_console(&q40_console_driver);
        }
@@ -124,8 +124,8 @@ static void q40_heartbeat(int on)
 
 static void q40_reset(void)
 {
-        halted = 1;
-        printk("\n\n*******************************************\n"
+       halted = 1;
+       pr_info("*******************************************\n"
                "Called q40_reset : press the RESET button!!\n"
                "*******************************************\n");
        Q40_LED_ON();
@@ -135,10 +135,10 @@ static void q40_reset(void)
 
 static void q40_halt(void)
 {
-        halted = 1;
-        printk("\n\n*******************\n"
-                  "  Called q40_halt\n"
-                  "*******************\n");
+       halted = 1;
+       pr_info("*******************\n"
+               "  Called q40_halt\n"
+               "*******************\n");
        Q40_LED_ON();
        while (1)
                ;
index 513f9bb17b9cffe6a667b3becf25471993d0b6e5..3e7603202977e715a6afc07e4ca0f3d713e0c3ed 100644 (file)
@@ -48,7 +48,8 @@ static unsigned int q40_irq_startup(struct irq_data *data)
        switch (irq) {
        case 1: case 2: case 8: case 9:
        case 11: case 12: case 13:
-               printk("%s: ISA IRQ %d not implemented by HW\n", __func__, irq);
+               pr_warn("%s: ISA IRQ %d not implemented by HW\n", __func__,
+                       irq);
                /* FIXME return -ENXIO; */
        }
        return 0;
@@ -250,7 +251,7 @@ static void q40_irq_handler(unsigned int irq, struct pt_regs *fp)
                                        disable_irq(irq);
                                        disabled = 1;
 #else
-                                       /*printk("IRQ_INPROGRESS detected for irq %d, disabling - %s disabled\n",
+                                       /*pr_warn("IRQ_INPROGRESS detected for irq %d, disabling - %s disabled\n",
                                                irq, disabled ? "already" : "not yet"); */
                                        fp->sr = (((fp->sr) & (~0x700))+0x200);
                                        disabled = 1;
@@ -273,7 +274,7 @@ static void q40_irq_handler(unsigned int irq, struct pt_regs *fp)
                                        }
 #else
                                        disabled = 0;
-                                       /*printk("reenabling irq %d\n", irq); */
+                                       /*pr_info("reenabling irq %d\n", irq); */
 #endif
                                }
 // used to do 'goto repeat;' here, this delayed bh processing too long
@@ -281,7 +282,8 @@ static void q40_irq_handler(unsigned int irq, struct pt_regs *fp)
                        }
                }
                if (mer && ccleirq > 0 && !aliased_irq) {
-                       printk("ISA interrupt from unknown source? EIRQ_REG = %x\n",mer);
+                       pr_warn("ISA interrupt from unknown source? EIRQ_REG = %x\n",
+                               mer);
                        ccleirq--;
                }
        }
@@ -301,7 +303,7 @@ void q40_irq_enable(struct irq_data *data)
        if (irq >= 5 && irq <= 15) {
                mext_disabled--;
                if (mext_disabled > 0)
-                       printk("q40_irq_enable : nested disable/enable\n");
+                       pr_warn("q40_irq_enable : nested disable/enable\n");
                if (mext_disabled == 0)
                        master_outb(1, EXT_ENABLE_REG);
        }
@@ -321,6 +323,7 @@ void q40_irq_disable(struct irq_data *data)
                master_outb(0, EXT_ENABLE_REG);
                mext_disabled++;
                if (mext_disabled > 1)
-                       printk("disable_irq nesting count %d\n",mext_disabled);
+                       pr_info("disable_irq nesting count %d\n",
+                               mext_disabled);
        }
 }
index 3af34fa3a344ba8509b6184acab3dc93a60b4cd4..1d28d380e8cc10379b5f155adb1a91194145150a 100644 (file)
@@ -134,7 +134,7 @@ void __init config_sun3(void)
 {
        unsigned long memory_start, memory_end;
 
-       printk("ARCH: SUN3\n");
+       pr_info("ARCH: SUN3\n");
        idprom_init();
 
        /* Subtract kernel memory from available memory */
index d95506e06c2ac42b67f9ce634413da8efc50082f..ca02ee25894cc4a5df94fa88b9c499c4be079f54 100644 (file)
@@ -31,8 +31,7 @@ static unsigned long dvma_page(unsigned long kaddr, unsigned long vaddr)
 
        ptep = pfn_pte(virt_to_pfn(kaddr), PAGE_KERNEL);
        pte = pte_val(ptep);
-//             printk("dvma_remap: addr %lx -> %lx pte %08lx len %x\n",
-//                    kaddr, vaddr, pte, len);
+//     pr_info("dvma_remap: addr %lx -> %lx pte %08lx\n", kaddr, vaddr, pte);
        if(ptelist[(vaddr & 0xff000) >> PAGE_SHIFT] != pte) {
                sun3_put_pte(vaddr, pte);
                ptelist[(vaddr & 0xff000) >> PAGE_SHIFT] = pte;
index cfe9aa4223431764f04f8f6fd29f4f00c3cb336c..9c23f506d60df450c809a4f774f8924278fb7961 100644 (file)
@@ -64,12 +64,14 @@ static void __init display_system_type(unsigned char machtype)
        for (i = 0; i < NUM_SUN_MACHINES; i++) {
                if(Sun_Machines[i].id_machtype == machtype) {
                        if (machtype != (SM_SUN4M_OBP | 0x00))
-                               printk("TYPE: %s\n", Sun_Machines[i].name);
+                               pr_info("TYPE: %s\n", Sun_Machines[i].name);
                        else {
 #if 0
+                               char sysname[128];
+
                                prom_getproperty(prom_root_node, "banner-name",
                                                 sysname, sizeof(sysname));
-                               printk("TYPE: %s\n", sysname);
+                               pr_info("TYPE: %s\n", sysname);
 #endif
                        }
                        return;
@@ -125,5 +127,5 @@ void __init idprom_init(void)
 
        display_system_type(idprom->id_machtype);
 
-       printk("Ethernet address: %pM\n", idprom->id_ethaddr);
+       pr_info("Ethernet address: %pM\n", idprom->id_ethaddr);
 }
index 0f95134e9b858f2af09256214b34da0c277a40f5..e9d7fbe4d5ae4a3de3f201c75b83b026c059ef96 100644 (file)
@@ -72,21 +72,21 @@ void print_pte (pte_t pte)
 #if 0
        /* Verbose version. */
        unsigned long val = pte_val (pte);
-       printk (" pte=%lx [addr=%lx",
+       pr_cont(" pte=%lx [addr=%lx",
                val, (val & SUN3_PAGE_PGNUM_MASK) << PAGE_SHIFT);
-       if (val & SUN3_PAGE_VALID)      printk (" valid");
-       if (val & SUN3_PAGE_WRITEABLE)  printk (" write");
-       if (val & SUN3_PAGE_SYSTEM)     printk (" sys");
-       if (val & SUN3_PAGE_NOCACHE)    printk (" nocache");
-       if (val & SUN3_PAGE_ACCESSED)   printk (" accessed");
-       if (val & SUN3_PAGE_MODIFIED)   printk (" modified");
+       if (val & SUN3_PAGE_VALID)      pr_cont(" valid");
+       if (val & SUN3_PAGE_WRITEABLE)  pr_cont(" write");
+       if (val & SUN3_PAGE_SYSTEM)     pr_cont(" sys");
+       if (val & SUN3_PAGE_NOCACHE)    pr_cont(" nocache");
+       if (val & SUN3_PAGE_ACCESSED)   pr_cont(" accessed");
+       if (val & SUN3_PAGE_MODIFIED)   pr_cont(" modified");
        switch (val & SUN3_PAGE_TYPE_MASK) {
-               case SUN3_PAGE_TYPE_MEMORY: printk (" memory"); break;
-               case SUN3_PAGE_TYPE_IO:     printk (" io");     break;
-               case SUN3_PAGE_TYPE_VME16:  printk (" vme16");  break;
-               case SUN3_PAGE_TYPE_VME32:  printk (" vme32");  break;
+               case SUN3_PAGE_TYPE_MEMORY: pr_cont(" memory"); break;
+               case SUN3_PAGE_TYPE_IO:     pr_cont(" io");     break;
+               case SUN3_PAGE_TYPE_VME16:  pr_cont(" vme16");  break;
+               case SUN3_PAGE_TYPE_VME32:  pr_cont(" vme32");  break;
        }
-       printk ("]\n");
+       pr_cont("]\n");
 #else
        /* Terse version. More likely to fit on a line. */
        unsigned long val = pte_val (pte);
@@ -108,7 +108,7 @@ void print_pte (pte_t pte)
                default: type = "unknown?"; break;
        }
 
-       printk (" pte=%08lx [%07lx %s %s]\n",
+       pr_cont(" pte=%08lx [%07lx %s %s]\n",
                val, (val & SUN3_PAGE_PGNUM_MASK) << PAGE_SHIFT, flags, type);
 #endif
 }
@@ -116,7 +116,7 @@ void print_pte (pte_t pte)
 /* Print the PTE value for a given virtual address. For debugging. */
 void print_pte_vaddr (unsigned long vaddr)
 {
-       printk (" vaddr=%lx [%02lx]", vaddr, sun3_get_segmap (vaddr));
+       pr_cont(" vaddr=%lx [%02lx]", vaddr, sun3_get_segmap (vaddr));
        print_pte (__pte (sun3_get_pte (vaddr)));
 }
 
@@ -153,7 +153,7 @@ void __init mmu_emu_init(unsigned long bootmem_end)
 
                if(!pmeg_alloc[i]) {
 #ifdef DEBUG_MMU_EMU
-                       printk("freed: ");
+                       pr_info("freed:");
                        print_pte_vaddr (seg);
 #endif
                        sun3_put_segmap(seg, SUN3_INVALID_PMEG);
@@ -165,7 +165,7 @@ void __init mmu_emu_init(unsigned long bootmem_end)
                if (sun3_get_segmap (seg) != SUN3_INVALID_PMEG) {
 #ifdef DEBUG_PROM_MAPS
                        for(i = 0; i < 16; i++) {
-                               printk ("mapped:");
+                               pr_info("mapped:");
                                print_pte_vaddr (seg + (i*PAGE_SIZE));
                                break;
                        }
@@ -293,8 +293,8 @@ inline void mmu_emu_map_pmeg (int context, int vaddr)
 
 
 #ifdef DEBUG_MMU_EMU
-printk("mmu_emu_map_pmeg: pmeg %x to context %d vaddr %x\n",
-       curr_pmeg, context, vaddr);
+       pr_info("mmu_emu_map_pmeg: pmeg %x to context %d vaddr %x\n",
+               curr_pmeg, context, vaddr);
 #endif
 
        /* Invalidate old mapping for the pmeg, if any */
@@ -370,7 +370,7 @@ int mmu_emu_handle_fault (unsigned long vaddr, int read_flag, int kernel_fault)
        }
 
 #ifdef DEBUG_MMU_EMU
-       printk ("mmu_emu_handle_fault: vaddr=%lx type=%s crp=%p\n",
+       pr_info("mmu_emu_handle_fault: vaddr=%lx type=%s crp=%p\n",
                vaddr, read_flag ? "read" : "write", crp);
 #endif
 
@@ -378,14 +378,15 @@ int mmu_emu_handle_fault (unsigned long vaddr, int read_flag, int kernel_fault)
        offset  = (vaddr >> SUN3_PTE_SIZE_BITS) & 0xF;
 
 #ifdef DEBUG_MMU_EMU
-       printk ("mmu_emu_handle_fault: segment=%lx offset=%lx\n", segment, offset);
+       pr_info("mmu_emu_handle_fault: segment=%lx offset=%lx\n", segment,
+               offset);
 #endif
 
        pte = (pte_t *) pgd_val (*(crp + segment));
 
 //todo: next line should check for valid pmd properly.
        if (!pte) {
-//                printk ("mmu_emu_handle_fault: invalid pmd\n");
+//                pr_info("mmu_emu_handle_fault: invalid pmd\n");
                 return 0;
         }
 
@@ -417,9 +418,9 @@ int mmu_emu_handle_fault (unsigned long vaddr, int read_flag, int kernel_fault)
                pte_val (*pte) |= SUN3_PAGE_ACCESSED;
 
 #ifdef DEBUG_MMU_EMU
-       printk ("seg:%d crp:%p ->", get_fs().seg, crp);
+       pr_info("seg:%ld crp:%p ->", get_fs().seg, crp);
        print_pte_vaddr (vaddr);
-       printk ("\n");
+       pr_cont("\n");
 #endif
 
        return 1;
index df85018f487ac1a5ab94774a5d7a00ecf5f10243..5b82bea03493e8e3741c2cb877e4e949884d409e 100644 (file)
@@ -39,7 +39,7 @@ prom_printf(char *fmt, ...)
 
 #ifdef CONFIG_KGDB
        if (kgdb_initialized) {
-               printk("kgdb_initialized = %d\n", kgdb_initialized);
+               pr_info("kgdb_initialized = %d\n", kgdb_initialized);
                putpacket(bptr, 1);
        } else
 #else
index b37521a5259ddb7a040d6606fdd24f324f657e8f..d36bd15f9fdcda46d0c0459ad4a694b32fdefbf2 100644 (file)
@@ -62,7 +62,7 @@ static void print_use(void)
        int i;
        int j = 0;
 
-       printk("dvma entry usage:\n");
+       pr_info("dvma entry usage:\n");
 
        for(i = 0; i < IOMMU_TOTAL_ENTRIES; i++) {
                if(!iommu_use[i])
@@ -70,16 +70,15 @@ static void print_use(void)
 
                j++;
 
-               printk("dvma entry: %08lx len %08lx\n",
-                      ( i << DVMA_PAGE_SHIFT) + DVMA_START,
-                      iommu_use[i]);
+               pr_info("dvma entry: %08x len %08lx\n",
+                       (i << DVMA_PAGE_SHIFT) + DVMA_START, iommu_use[i]);
        }
 
-       printk("%d entries in use total\n", j);
+       pr_info("%d entries in use total\n", j);
 
-       printk("allocation/free calls: %lu/%lu\n", dvma_allocs, dvma_frees);
-       printk("allocation/free bytes: %Lx/%Lx\n", dvma_alloc_bytes,
-              dvma_free_bytes);
+       pr_info("allocation/free calls: %lu/%lu\n", dvma_allocs, dvma_frees);
+       pr_info("allocation/free bytes: %Lx/%Lx\n", dvma_alloc_bytes,
+               dvma_free_bytes);
 }
 
 static void print_holes(struct list_head *holes)
@@ -88,18 +87,18 @@ static void print_holes(struct list_head *holes)
        struct list_head *cur;
        struct hole *hole;
 
-       printk("listing dvma holes\n");
+       pr_info("listing dvma holes\n");
        list_for_each(cur, holes) {
                hole = list_entry(cur, struct hole, list);
 
                if((hole->start == 0) && (hole->end == 0) && (hole->size == 0))
                        continue;
 
-               printk("hole: start %08lx end %08lx size %08lx\n", hole->start, hole->end, hole->size);
+               pr_info("hole: start %08lx end %08lx size %08lx\n",
+                       hole->start, hole->end, hole->size);
        }
 
-       printk("end of hole listing...\n");
-
+       pr_info("end of hole listing...\n");
 }
 #endif /* DVMA_DEBUG */
 
@@ -137,7 +136,7 @@ static inline struct hole *rmcache(void)
 
        if(list_empty(&hole_cache)) {
                if(!refill()) {
-                       printk("out of dvma hole cache!\n");
+                       pr_crit("out of dvma hole cache!\n");
                        BUG();
                }
        }
@@ -157,7 +156,7 @@ static inline unsigned long get_baddr(int len, unsigned long align)
 
        if(list_empty(&hole_list)) {
 #ifdef DVMA_DEBUG
-               printk("out of dvma holes! (printing hole cache)\n");
+               pr_crit("out of dvma holes! (printing hole cache)\n");
                print_holes(&hole_cache);
                print_use();
 #endif
@@ -195,7 +194,7 @@ static inline unsigned long get_baddr(int len, unsigned long align)
 
        }
 
-       printk("unable to find dvma hole!\n");
+       pr_crit("unable to find dvma hole!\n");
        BUG();
        return 0;
 }
@@ -287,15 +286,12 @@ unsigned long dvma_map_align(unsigned long kaddr, int len, int align)
                len = 0x800;
 
        if(!kaddr || !len) {
-//             printk("error: kaddr %lx len %x\n", kaddr, len);
+//             pr_err("error: kaddr %lx len %x\n", kaddr, len);
 //             *(int *)4 = 0;
                return 0;
        }
 
-#ifdef DEBUG
-       printk("dvma_map request %08lx bytes from %08lx\n",
-              len, kaddr);
-#endif
+       pr_debug("dvma_map request %08x bytes from %08lx\n", len, kaddr);
        off = kaddr & ~DVMA_PAGE_MASK;
        kaddr &= PAGE_MASK;
        len += off;
@@ -307,12 +303,13 @@ unsigned long dvma_map_align(unsigned long kaddr, int len, int align)
                align = ((align + (DVMA_PAGE_SIZE-1)) & DVMA_PAGE_MASK);
 
        baddr = get_baddr(len, align);
-//     printk("using baddr %lx\n", baddr);
+//     pr_info("using baddr %lx\n", baddr);
 
        if(!dvma_map_iommu(kaddr, baddr, len))
                return (baddr + off);
 
-       printk("dvma_map failed kaddr %lx baddr %lx len %x\n", kaddr, baddr, len);
+       pr_crit("dvma_map failed kaddr %lx baddr %lx len %x\n", kaddr, baddr,
+       len);
        BUG();
        return 0;
 }
@@ -343,9 +340,7 @@ void *dvma_malloc_align(unsigned long len, unsigned long align)
        if(!len)
                return NULL;
 
-#ifdef DEBUG
-       printk("dvma_malloc request %lx bytes\n", len);
-#endif
+       pr_debug("dvma_malloc request %lx bytes\n", len);
        len = ((len + (DVMA_PAGE_SIZE-1)) & DVMA_PAGE_MASK);
 
         if((kaddr = __get_free_pages(GFP_ATOMIC, get_order(len))) == 0)
@@ -364,10 +359,8 @@ void *dvma_malloc_align(unsigned long len, unsigned long align)
                return NULL;
        }
 
-#ifdef DEBUG
-       printk("mapped %08lx bytes %08lx kern -> %08lx bus\n",
-              len, kaddr, baddr);
-#endif
+       pr_debug("mapped %08lx bytes %08lx kern -> %08lx bus\n", len, kaddr,
+                baddr);
 
        return (void *)vaddr;
 
index d5ddcdaa2347356152047deb6d272a5594e0b97a..9413c8724b0d3372bbf063118404ab5866c434fa 100644 (file)
@@ -58,21 +58,17 @@ static volatile unsigned long *iommu_pte = (unsigned long *)SUN3X_IOMMU;
                                         ((addr & 0x03c00000) >>     \
                                                (DVMA_PAGE_SHIFT+4)))
 
-#undef DEBUG
-
 #ifdef DEBUG
 /* code to print out a dvma mapping for debugging purposes */
 void dvma_print (unsigned long dvma_addr)
 {
 
-        unsigned long index;
-
-        index = dvma_addr >> DVMA_PAGE_SHIFT;
-
-        printk("idx %lx dvma_addr %08lx paddr %08lx\n", index, dvma_addr,
-               dvma_entry_paddr(index));
+       unsigned long index;
 
+       index = dvma_addr >> DVMA_PAGE_SHIFT;
 
+       pr_info("idx %lx dvma_addr %08lx paddr %08lx\n", index, dvma_addr,
+               dvma_entry_paddr(index));
 }
 #endif
 
@@ -91,10 +87,7 @@ inline int dvma_map_cpu(unsigned long kaddr,
 
        end = PAGE_ALIGN(vaddr + len);
 
-#ifdef DEBUG
-       printk("dvma: mapping kern %08lx to virt %08lx\n",
-              kaddr, vaddr);
-#endif
+       pr_debug("dvma: mapping kern %08lx to virt %08lx\n", kaddr, vaddr);
        pgd = pgd_offset_k(vaddr);
 
        do {
@@ -126,10 +119,8 @@ inline int dvma_map_cpu(unsigned long kaddr,
                                end3 = end2;
 
                        do {
-#ifdef DEBUG
-                               printk("mapping %08lx phys to %08lx\n",
-                                      __pa(kaddr), vaddr);
-#endif
+                               pr_debug("mapping %08lx phys to %08lx\n",
+                                        __pa(kaddr), vaddr);
                                set_pte(pte, pfn_pte(virt_to_pfn(kaddr),
                                                     PAGE_KERNEL));
                                pte++;
@@ -162,7 +153,8 @@ inline int dvma_map_iommu(unsigned long kaddr, unsigned long baddr,
        for(; index < end ; index++) {
 //             if(dvma_entry_use(index))
 //                     BUG();
-//             printk("mapping pa %lx to ba %lx\n", __pa(kaddr), index << DVMA_PAGE_SHIFT);
+//             pr_info("mapping pa %lx to ba %lx\n", __pa(kaddr),
+//                     index << DVMA_PAGE_SHIFT);
 
                dvma_entry_set(index, __pa(kaddr));
 
@@ -190,13 +182,12 @@ void dvma_unmap_iommu(unsigned long baddr, int len)
        end = (DVMA_PAGE_ALIGN(baddr+len) >> DVMA_PAGE_SHIFT);
 
        for(; index < end ; index++) {
-#ifdef DEBUG
-               printk("freeing bus mapping %08x\n", index << DVMA_PAGE_SHIFT);
-#endif
+               pr_debug("freeing bus mapping %08x\n",
+                        index << DVMA_PAGE_SHIFT);
 #if 0
                if(!dvma_entry_use(index))
-                       printk("dvma_unmap freeing unused entry %04x\n",
-                              index);
+                       pr_info("dvma_unmap freeing unused entry %04x\n",
+                               index);
                else
                        dvma_entry_dec(index);
 #endif
index 0898c3f8150851a34e8c4f54fe8c3d7760497cd4..5d60e65c1ee5274edb9b3a2898df912a246a0f5b 100644 (file)
@@ -106,9 +106,9 @@ void __init sun3x_prom_init(void)
        idprom_init();
 
        if (!((idprom->id_machtype & SM_ARCH_MASK) == SM_SUN3X)) {
-               printk("Warning: machine reports strange type %02x\n",
+               pr_warn("Machine reports strange type %02x\n",
                        idprom->id_machtype);
-               printk("Pretending it's a 3/80, but very afraid...\n");
+               pr_warn("Pretending it's a 3/80, but very afraid...\n");
                idprom->id_machtype = SM_SUN3X | SM_3_80;
        }
 
index 167150c701d18f1bd5d1ef0abafb8d4750580093..d3731f0db73b77af36207b07ff4b74a34341c1bf 100644 (file)
@@ -2,7 +2,6 @@ generic-y += auxvec.h
 generic-y += bitsperlong.h
 generic-y += bugs.h
 generic-y += clkdev.h
-generic-y += cputime.h
 generic-y += current.h
 generic-y += device.h
 generic-y += dma.h
index b0ae88c9fed922a4ba95be0498f421d3927eb40e..6275eb051801236cbdd235d5be84d6d1574cd838 100644 (file)
@@ -1,7 +1,6 @@
 
 generic-y += barrier.h
 generic-y += clkdev.h
-generic-y += cputime.h
 generic-y += device.h
 generic-y += exec.h
 generic-y += irq_work.h
index b3c5bde43d34f85afc50c8eb955745b88836386c..e137eedb90d254768b80198c63cce89bcdb5d45c 100644 (file)
@@ -1703,6 +1703,8 @@ config CPU_BMIPS
        select WEAK_ORDERING
        select CPU_SUPPORTS_HIGHMEM
        select CPU_HAS_PREFETCH
+       select CPU_SUPPORTS_CPUFREQ
+       select MIPS_EXTERNAL_TIMER
        help
          Support for BMIPS32/3300/4350/4380 and BMIPS5000 processors.
 
index 4eb5d6e9cf8f1f256cbcf91b653746cb53ee0dde..3cefa6bc01ddf880320209187f574340c5e3996e 100644 (file)
@@ -9,13 +9,20 @@ CONFIG_MIPS_O32_FP64_SUPPORT=y
 # CONFIG_SWAP is not set
 CONFIG_NO_HZ=y
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_RD_GZIP=y
 CONFIG_EXPERT=y
 # CONFIG_VM_EVENT_COUNTERS is not set
 # CONFIG_SLUB_DEBUG is not set
 # CONFIG_BLK_DEV_BSG is not set
 # CONFIG_IOSCHED_DEADLINE is not set
 # CONFIG_IOSCHED_CFQ is not set
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_STAT=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
+CONFIG_BMIPS_CPUFREQ=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_PACKET_DIAG=y
@@ -24,7 +31,6 @@ CONFIG_INET=y
 # CONFIG_INET_XFRM_MODE_TRANSPORT is not set
 # CONFIG_INET_XFRM_MODE_TUNNEL is not set
 # CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
 # CONFIG_INET_DIAG is not set
 CONFIG_CFG80211=y
 CONFIG_NL80211_TESTMODE=y
@@ -34,8 +40,6 @@ CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
 # CONFIG_STANDALONE is not set
 # CONFIG_PREVENT_FIRMWARE_BUILD is not set
-CONFIG_PRINTK_TIME=y
-CONFIG_BRCMSTB_GISB_ARB=y
 CONFIG_MTD=y
 CONFIG_MTD_CFI=y
 CONFIG_MTD_CFI_INTELEXT=y
@@ -51,16 +55,15 @@ CONFIG_USB_USBNET=y
 # CONFIG_INPUT is not set
 # CONFIG_SERIO is not set
 # CONFIG_VT is not set
-# CONFIG_DEVKMEM is not set
 CONFIG_SERIAL_8250=y
 # CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
 CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_SERIAL_OF_PLATFORM=y
 # CONFIG_HW_RANDOM is not set
-CONFIG_POWER_SUPPLY=y
 CONFIG_POWER_RESET=y
 CONFIG_POWER_RESET_BRCMSTB=y
 CONFIG_POWER_RESET_SYSCON=y
+CONFIG_POWER_SUPPLY=y
 # CONFIG_HWMON is not set
 CONFIG_USB=y
 CONFIG_USB_EHCI_HCD=y
@@ -82,6 +85,7 @@ CONFIG_CIFS=y
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ASCII=y
 CONFIG_NLS_ISO8859_1=y
+CONFIG_PRINTK_TIME=y
 CONFIG_DEBUG_FS=y
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_CMDLINE_BOOL=y
index 5da76e0e120f59fc788d98c11f0a949f9fbc30db..bed745596d8692c7200492a66a42e5a657ce06ec 100644 (file)
@@ -40,7 +40,6 @@ CONFIG_PM_STD_PARTITION="/dev/hda3"
 CONFIG_CPU_FREQ=y
 CONFIG_CPU_FREQ_DEBUG=y
 CONFIG_CPU_FREQ_STAT=m
-CONFIG_CPU_FREQ_STAT_DETAILS=y
 CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
 CONFIG_CPU_FREQ_GOV_POWERSAVE=m
 CONFIG_CPU_FREQ_GOV_USERSPACE=m
index 3269b742a75e1e61328fa4aa230759cc1350ab1f..994b1c4392bedcc067b3a6b7f52a48ceab5c738f 100644 (file)
@@ -1,7 +1,6 @@
 # MIPS headers
 generic-(CONFIG_GENERIC_CSUM) += checksum.h
 generic-y += clkdev.h
-generic-y += cputime.h
 generic-y += current.h
 generic-y += dma-contiguous.h
 generic-y += emergency-restart.h
index 9c7f3e136d50da8874df0af6749aa2dfe358d61c..4a2ff3953b99ce44b1b78958ed6a8ad5743ba129 100644 (file)
@@ -99,15 +99,7 @@ jiffies_to_compat_timeval(unsigned long jiffies, struct compat_timeval *value)
 #undef TASK_SIZE
 #define TASK_SIZE TASK_SIZE32
 
-#undef cputime_to_timeval
-#define cputime_to_timeval cputime_to_compat_timeval
-static __inline__ void
-cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value)
-{
-       unsigned long jiffies = cputime_to_jiffies(cputime);
-
-       value->tv_usec = (jiffies % HZ) * (1000000L / HZ);
-       value->tv_sec = jiffies / HZ;
-}
+#undef ns_to_timeval
+#define ns_to_timeval ns_to_compat_timeval
 
 #include "../../../fs/binfmt_elf.c"
index 1ab34322dd977cdc898475573f26dfb19b326c59..3916404e7fd102eb19b42a1d142fdd2c0aaf3354 100644 (file)
@@ -102,15 +102,7 @@ jiffies_to_compat_timeval(unsigned long jiffies, struct compat_timeval *value)
 #undef TASK_SIZE
 #define TASK_SIZE TASK_SIZE32
 
-#undef cputime_to_timeval
-#define cputime_to_timeval cputime_to_compat_timeval
-static __inline__ void
-cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value)
-{
-       unsigned long jiffies = cputime_to_jiffies(cputime);
-
-       value->tv_usec = (jiffies % HZ) * (1000000L / HZ);
-       value->tv_sec = jiffies / HZ;
-}
+#undef ns_to_timeval
+#define ns_to_timeval ns_to_compat_timeval
 
 #include "../../../fs/binfmt_elf.c"
index 1c8dd0f5cd5d1567126f42b67112b5b0f91962a8..97f64c723a0c811a922437900327a728c2e462c4 100644 (file)
@@ -1,7 +1,6 @@
 
 generic-y += barrier.h
 generic-y += clkdev.h
-generic-y += cputime.h
 generic-y += exec.h
 generic-y += irq_work.h
 generic-y += mcs_spinlock.h
index 393d311735c8b573bd5702eac1dcaaac1103600e..67e333aa7629c406745564cb24acc5903733ec41 100644 (file)
@@ -16,7 +16,7 @@
 struct task_struct;
 struct thread_struct;
 
-#if !defined(CONFIG_LAZY_SAVE_FPU)
+#if defined(CONFIG_FPU) && !defined(CONFIG_LAZY_SAVE_FPU)
 struct fpu_state_struct;
 extern asmlinkage void fpu_save(struct fpu_state_struct *);
 #define switch_fpu(prev, next)                                         \
index d63330e88379dcc591e29e645b5363f063122f9d..35b0e883761a846e7cd63d8175e0aa304de73f9c 100644 (file)
@@ -6,7 +6,6 @@ generic-y += bitsperlong.h
 generic-y += bug.h
 generic-y += bugs.h
 generic-y += clkdev.h
-generic-y += cputime.h
 generic-y += current.h
 generic-y += device.h
 generic-y += div64.h
index 2832f031fb11d0e86fe695ced404c41cf36ebf90..ef8d1ccc3e450eaaaaaa8b199e7bf8b7a5b5436d 100644 (file)
@@ -12,7 +12,6 @@ generic-y += checksum.h
 generic-y += clkdev.h
 generic-y += cmpxchg-local.h
 generic-y += cmpxchg.h
-generic-y += cputime.h
 generic-y += current.h
 generic-y += device.h
 generic-y += div64.h
index 91f53c07f410b7cc579c3407edbf907371e329f6..4e179d770d69456c8864fac3e704adb6f719cf1b 100644 (file)
@@ -2,7 +2,6 @@
 generic-y += auxvec.h
 generic-y += barrier.h
 generic-y += clkdev.h
-generic-y += cputime.h
 generic-y += device.h
 generic-y += div64.h
 generic-y += emergency-restart.h
index 3f9406d9b9d675c833cdfd6f661b24e359bd3453..da87943328a510d468d089fdf648edfa5933445b 100644 (file)
@@ -6,7 +6,7 @@
 #endif
 
 #include <linux/compiler.h>
-#include <asm/types.h>         /* for BITS_PER_LONG/SHIFT_PER_LONG */
+#include <asm/types.h>
 #include <asm/byteorder.h>
 #include <asm/barrier.h>
 #include <linux/atomic.h>
  * to include/asm-i386/bitops.h or kerneldoc
  */
 
+#if __BITS_PER_LONG == 64
+#define SHIFT_PER_LONG 6
+#else
+#define SHIFT_PER_LONG 5
+#endif
+
 #define CHOP_SHIFTCOUNT(x) (((unsigned long) (x)) & (BITS_PER_LONG - 1))
 
 
index e0a23c7bdd432adc59f4f4ebefcc4d51f91b05f1..07fa7e50bdc069dd989cd52dd283517250a3b1e4 100644 (file)
@@ -3,10 +3,8 @@
 
 #if defined(__LP64__)
 #define __BITS_PER_LONG 64
-#define SHIFT_PER_LONG 6
 #else
 #define __BITS_PER_LONG 32
-#define SHIFT_PER_LONG 5
 #endif
 
 #include <asm-generic/bitsperlong.h>
index e78403b129ef927361032c8c92c5e3c82e830941..928e1bbac98fef4fa098163d12a98670000065d2 100644 (file)
@@ -1,6 +1,7 @@
 #ifndef _PARISC_SWAB_H
 #define _PARISC_SWAB_H
 
+#include <asm/bitsperlong.h>
 #include <linux/types.h>
 #include <linux/compiler.h>
 
@@ -38,7 +39,7 @@ static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
 }
 #define __arch_swab32 __arch_swab32
 
-#if BITS_PER_LONG > 32
+#if __BITS_PER_LONG > 32
 /*
 ** From "PA-RISC 2.0 Architecture", HP Professional Books.
 ** See Appendix I page 8 , "Endian Byte Swapping".
@@ -61,6 +62,6 @@ static inline __attribute_const__ __u64 __arch_swab64(__u64 x)
        return x;
 }
 #define __arch_swab64 __arch_swab64
-#endif /* BITS_PER_LONG > 32 */
+#endif /* __BITS_PER_LONG > 32 */
 
 #endif /* _PARISC_SWAB_H */
index 00dc66f9c2ba0a41fd6774fc709d9b62152e4e5f..f2adcf33f8f2218761b19e9414cbbf8e0c4085b8 100644 (file)
@@ -91,14 +91,7 @@ struct elf_prpsinfo32
        current->thread.map_base = DEFAULT_MAP_BASE32; \
        current->thread.task_size = DEFAULT_TASK_SIZE32 \
 
-#undef cputime_to_timeval
-#define cputime_to_timeval cputime_to_compat_timeval
-static __inline__ void
-cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value)
-{
-       unsigned long jiffies = cputime_to_jiffies(cputime);
-       value->tv_usec = (jiffies % HZ) * (1000000L / HZ);
-       value->tv_sec = jiffies / HZ;
-}
+#undef ns_to_timeval
+#define ns_to_timeval ns_to_compat_timeval
 
 #include "../../../fs/binfmt_elf.c"
index 2e66a887788e8781bf76b13cbd3fed02e6979259..068ed3607bac0cdad18a381b8c2052c7c985b97b 100644 (file)
@@ -36,6 +36,7 @@
 #undef PCI_DEBUG
 #include <linux/proc_fs.h>
 #include <linux/export.h>
+#include <linux/sched.h>
 
 #include <asm/processor.h>
 #include <asm/sections.h>
@@ -176,6 +177,7 @@ void __init setup_arch(char **cmdline_p)
        conswitchp = &dummy_con;        /* we use do_take_over_console() later ! */
 #endif
 
+       clear_sched_clock_stable();
 }
 
 /*
index a8ee573fe610bd5e2d8191b4dffb05e134a6d3c2..281f4f1fcd1f68ab2fbc613afa3f2597bd090550 100644 (file)
@@ -164,7 +164,6 @@ config PPC
        select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE
        select HAVE_ARCH_HARDENED_USERCOPY
        select HAVE_KERNEL_GZIP
-       select HAVE_CC_STACKPROTECTOR
 
 config GENERIC_CSUM
        def_bool CPU_LITTLE_ENDIAN
@@ -484,6 +483,7 @@ config RELOCATABLE
        bool "Build a relocatable kernel"
        depends on (PPC64 && !COMPILE_TEST) || (FLATMEM && (44x || FSL_BOOKE))
        select NONSTATIC_KERNEL
+       select MODULE_REL_CRCS if MODVERSIONS
        help
          This builds a kernel image that is capable of running at the
          location the kernel is loaded at. For ppc32, there is no any
index c744569a20e10d794bd47d1641d98fc5a83fd85a..a97296c64eb2227027b4d4ee6d3c4f2e033d075e 100644 (file)
                compatible = "fsl,t2080-l2-cache-controller";
                reg = <0xc20000 0x40000>;
                next-level-cache = <&cpc>;
+               interrupts = <16 2 1 9>;
        };
 };
index 3ce91a3df27f175286d0c70b71e9254fbcc1196a..1d2d69dd6409036565b4d1e9bfbb0a81f37da7e9 100644 (file)
@@ -62,7 +62,6 @@ CONFIG_MPC8610_HPCD=y
 CONFIG_GEF_SBC610=y
 CONFIG_CPU_FREQ=y
 CONFIG_CPU_FREQ_STAT=m
-CONFIG_CPU_FREQ_STAT_DETAILS=y
 CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y
 CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
 CONFIG_CPU_FREQ_GOV_POWERSAVE=m
index c133246df4679e3780b381609d30ce2381fcaf14..3abcf98ed2e02bd37aa8a9e55ef8b26c1ea85e10 100644 (file)
 
 /* Stuff for accurate time accounting */
 struct cpu_accounting_data {
-       unsigned long user_time;        /* accumulated usermode TB ticks */
-       unsigned long system_time;      /* accumulated system TB ticks */
-       unsigned long user_time_scaled; /* accumulated usermode SPURR ticks */
+       /* Accumulated cputime values to flush on ticks*/
+       unsigned long utime;
+       unsigned long stime;
+       unsigned long utime_scaled;
+       unsigned long stime_scaled;
+       unsigned long gtime;
+       unsigned long hardirq_time;
+       unsigned long softirq_time;
+       unsigned long steal_time;
+       unsigned long idle_time;
+       /* Internal counters */
        unsigned long starttime;        /* TB value snapshot */
        unsigned long starttime_user;   /* TB value on exit to usermode */
        unsigned long startspurr;       /* SPURR value snapshot */
index b312b152461b0539a22c5939ba7728a38f5d8d32..6e834caa37206a476792823463e81ac4c9617f9c 100644 (file)
@@ -23,7 +23,9 @@ static __always_inline bool cpu_has_feature(unsigned long feature)
 {
        int i;
 
+#ifndef __clang__ /* clang can't cope with this */
        BUILD_BUG_ON(!__builtin_constant_p(feature));
+#endif
 
 #ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG
        if (!static_key_initialized) {
index aa2e6a34b872755ad58c9a12ed145f9d4f9a62bb..99b541865d8dd65cb420e25eae476b2ec6d71632 100644 (file)
 #ifndef __POWERPC_CPUTIME_H
 #define __POWERPC_CPUTIME_H
 
-#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
-#include <asm-generic/cputime.h>
-#ifdef __KERNEL__
-static inline void setup_cputime_one_jiffy(void) { }
-#endif
-#else
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
 
 #include <linux/types.h>
 #include <linux/time.h>
@@ -36,65 +31,6 @@ typedef u64 __nocast cputime64_t;
 #define cmpxchg_cputime(ptr, old, new) cmpxchg(ptr, old, new)
 
 #ifdef __KERNEL__
-
-/*
- * One jiffy in timebase units computed during initialization
- */
-extern cputime_t cputime_one_jiffy;
-
-/*
- * Convert cputime <-> jiffies
- */
-extern u64 __cputime_jiffies_factor;
-
-static inline unsigned long cputime_to_jiffies(const cputime_t ct)
-{
-       return mulhdu((__force u64) ct, __cputime_jiffies_factor);
-}
-
-static inline cputime_t jiffies_to_cputime(const unsigned long jif)
-{
-       u64 ct;
-       unsigned long sec;
-
-       /* have to be a little careful about overflow */
-       ct = jif % HZ;
-       sec = jif / HZ;
-       if (ct) {
-               ct *= tb_ticks_per_sec;
-               do_div(ct, HZ);
-       }
-       if (sec)
-               ct += (cputime_t) sec * tb_ticks_per_sec;
-       return (__force cputime_t) ct;
-}
-
-static inline void setup_cputime_one_jiffy(void)
-{
-       cputime_one_jiffy = jiffies_to_cputime(1);
-}
-
-static inline cputime64_t jiffies64_to_cputime64(const u64 jif)
-{
-       u64 ct;
-       u64 sec = jif;
-
-       /* have to be a little careful about overflow */
-       ct = do_div(sec, HZ);
-       if (ct) {
-               ct *= tb_ticks_per_sec;
-               do_div(ct, HZ);
-       }
-       if (sec)
-               ct += (u64) sec * tb_ticks_per_sec;
-       return (__force cputime64_t) ct;
-}
-
-static inline u64 cputime64_to_jiffies64(const cputime_t ct)
-{
-       return mulhdu((__force u64) ct, __cputime_jiffies_factor);
-}
-
 /*
  * Convert cputime <-> microseconds
  */
@@ -105,117 +41,6 @@ static inline unsigned long cputime_to_usecs(const cputime_t ct)
        return mulhdu((__force u64) ct, __cputime_usec_factor);
 }
 
-static inline cputime_t usecs_to_cputime(const unsigned long us)
-{
-       u64 ct;
-       unsigned long sec;
-
-       /* have to be a little careful about overflow */
-       ct = us % 1000000;
-       sec = us / 1000000;
-       if (ct) {
-               ct *= tb_ticks_per_sec;
-               do_div(ct, 1000000);
-       }
-       if (sec)
-               ct += (cputime_t) sec * tb_ticks_per_sec;
-       return (__force cputime_t) ct;
-}
-
-#define usecs_to_cputime64(us)         usecs_to_cputime(us)
-
-/*
- * Convert cputime <-> seconds
- */
-extern u64 __cputime_sec_factor;
-
-static inline unsigned long cputime_to_secs(const cputime_t ct)
-{
-       return mulhdu((__force u64) ct, __cputime_sec_factor);
-}
-
-static inline cputime_t secs_to_cputime(const unsigned long sec)
-{
-       return (__force cputime_t)((u64) sec * tb_ticks_per_sec);
-}
-
-/*
- * Convert cputime <-> timespec
- */
-static inline void cputime_to_timespec(const cputime_t ct, struct timespec *p)
-{
-       u64 x = (__force u64) ct;
-       unsigned int frac;
-
-       frac = do_div(x, tb_ticks_per_sec);
-       p->tv_sec = x;
-       x = (u64) frac * 1000000000;
-       do_div(x, tb_ticks_per_sec);
-       p->tv_nsec = x;
-}
-
-static inline cputime_t timespec_to_cputime(const struct timespec *p)
-{
-       u64 ct;
-
-       ct = (u64) p->tv_nsec * tb_ticks_per_sec;
-       do_div(ct, 1000000000);
-       return (__force cputime_t)(ct + (u64) p->tv_sec * tb_ticks_per_sec);
-}
-
-/*
- * Convert cputime <-> timeval
- */
-static inline void cputime_to_timeval(const cputime_t ct, struct timeval *p)
-{
-       u64 x = (__force u64) ct;
-       unsigned int frac;
-
-       frac = do_div(x, tb_ticks_per_sec);
-       p->tv_sec = x;
-       x = (u64) frac * 1000000;
-       do_div(x, tb_ticks_per_sec);
-       p->tv_usec = x;
-}
-
-static inline cputime_t timeval_to_cputime(const struct timeval *p)
-{
-       u64 ct;
-
-       ct = (u64) p->tv_usec * tb_ticks_per_sec;
-       do_div(ct, 1000000);
-       return (__force cputime_t)(ct + (u64) p->tv_sec * tb_ticks_per_sec);
-}
-
-/*
- * Convert cputime <-> clock_t (units of 1/USER_HZ seconds)
- */
-extern u64 __cputime_clockt_factor;
-
-static inline unsigned long cputime_to_clock_t(const cputime_t ct)
-{
-       return mulhdu((__force u64) ct, __cputime_clockt_factor);
-}
-
-static inline cputime_t clock_t_to_cputime(const unsigned long clk)
-{
-       u64 ct;
-       unsigned long sec;
-
-       /* have to be a little careful about overflow */
-       ct = clk % USER_HZ;
-       sec = clk / USER_HZ;
-       if (ct) {
-               ct *= tb_ticks_per_sec;
-               do_div(ct, USER_HZ);
-       }
-       if (sec)
-               ct += (u64) sec * tb_ticks_per_sec;
-       return (__force cputime_t) ct;
-}
-
-#define cputime64_to_clock_t(ct)       cputime_to_clock_t((cputime_t)(ct))
-
 /*
  * PPC64 uses PACA which is task independent for storing accounting data while
  * PPC32 uses struct thread_info, therefore at task switch the accounting data
index a402f7f948965f1f28d4c3f2385001c1ecdef2b6..47a03b9b528b46672ece81b641ebecc13247c817 100644 (file)
@@ -28,13 +28,6 @@ static inline int klp_check_compiler_support(void)
        return 0;
 }
 
-static inline int klp_write_module_reloc(struct module *mod, unsigned long
-               type, unsigned long loc, unsigned long value)
-{
-       /* This requires infrastructure changes; we need the loadinfos. */
-       return -ENOSYS;
-}
-
 static inline void klp_arch_set_pc(struct pt_regs *regs, unsigned long ip)
 {
        regs->nip = ip;
index a34c764ca8dd83435faf75307e30e5149e55de4b..233a7e8cc8e32d6cf0ac904b3f02b2f340e3883b 100644 (file)
@@ -160,7 +160,9 @@ static __always_inline bool mmu_has_feature(unsigned long feature)
 {
        int i;
 
+#ifndef __clang__ /* clang can't cope with this */
        BUILD_BUG_ON(!__builtin_constant_p(feature));
+#endif
 
 #ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG
        if (!static_key_initialized) {
index cc12c61ef315fc6ca5d43233bd54889cd19a01a0..53885512b8d31b12acec28dc3ba6688e57fb9615 100644 (file)
@@ -90,9 +90,5 @@ static inline int module_finalize_ftrace(struct module *mod, const Elf_Shdr *sec
 }
 #endif
 
-#if defined(CONFIG_MODVERSIONS) && defined(CONFIG_PPC64)
-#define ARCH_RELOCATES_KCRCTAB
-#define reloc_start PHYSICAL_START
-#endif
 #endif /* __KERNEL__ */
 #endif /* _ASM_POWERPC_MODULE_H */
index 6a6792bb39fbc8616034732322bd497ed30c5961..708c3e592eeb101ce25f8bcd12cceb95e7cf8825 100644 (file)
@@ -187,7 +187,6 @@ struct paca_struct {
 
        /* Stuff for accurate time accounting */
        struct cpu_accounting_data accounting;
-       u64 stolen_time;                /* TB ticks taken by hypervisor */
        u64 dtl_ridx;                   /* read index in dispatch log */
        struct dtl_entry *dtl_curr;     /* pointer corresponding to dtl_ridx */
 
index 0d4531aa2052d77fb8036b05f3550c33cfdde49e..dff79798903da7edc9dbc6e440fa97bbe9afe53f 100644 (file)
 #define   SRR1_ISI_N_OR_G      0x10000000 /* ISI: Access is no-exec or G */
 #define   SRR1_ISI_PROT                0x08000000 /* ISI: Other protection fault */
 #define   SRR1_WAKEMASK                0x00380000 /* reason for wakeup */
-#define   SRR1_WAKEMASK_P8     0x003c0000 /* reason for wakeup on POWER8 */
+#define   SRR1_WAKEMASK_P8     0x003c0000 /* reason for wakeup on POWER8 and 9 */
 #define   SRR1_WAKESYSERR      0x00300000 /* System error */
 #define   SRR1_WAKEEE          0x00200000 /* External interrupt */
+#define   SRR1_WAKEHVI         0x00240000 /* Hypervisor Virtualization Interrupt (P9) */
 #define   SRR1_WAKEMT          0x00280000 /* mtctrl */
 #define          SRR1_WAKEHMI          0x00280000 /* Hypervisor maintenance */
 #define   SRR1_WAKEDEC         0x00180000 /* Decrementer interrupt */
diff --git a/arch/powerpc/include/asm/stackprotector.h b/arch/powerpc/include/asm/stackprotector.h
deleted file mode 100644 (file)
index 6720190..0000000
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * GCC stack protector support.
- *
- * Stack protector works by putting predefined pattern at the start of
- * the stack frame and verifying that it hasn't been overwritten when
- * returning from the function.  The pattern is called stack canary
- * and gcc expects it to be defined by a global variable called
- * "__stack_chk_guard" on PPC.  This unfortunately means that on SMP
- * we cannot have a different canary value per task.
- */
-
-#ifndef _ASM_STACKPROTECTOR_H
-#define _ASM_STACKPROTECTOR_H
-
-#include <linux/random.h>
-#include <linux/version.h>
-#include <asm/reg.h>
-
-extern unsigned long __stack_chk_guard;
-
-/*
- * Initialize the stackprotector canary value.
- *
- * NOTE: this must only be called from functions that never return,
- * and it must always be inlined.
- */
-static __always_inline void boot_init_stack_canary(void)
-{
-       unsigned long canary;
-
-       /* Try to get a semi random initial value. */
-       get_random_bytes(&canary, sizeof(canary));
-       canary ^= mftb();
-       canary ^= LINUX_VERSION_CODE;
-
-       current->stack_canary = canary;
-       __stack_chk_guard = current->stack_canary;
-}
-
-#endif /* _ASM_STACKPROTECTOR_H */
index f0b238516e9b44b5afabc52a8460daaa6e81ca97..e0b9e576905aa2e1c24060db56de70f8b02ea920 100644 (file)
@@ -44,6 +44,7 @@ static inline int icp_hv_init(void) { return -ENODEV; }
 
 #ifdef CONFIG_PPC_POWERNV
 extern int icp_opal_init(void);
+extern void icp_opal_flush_interrupt(void);
 #else
 static inline int icp_opal_init(void) { return -ENODEV; }
 #endif
index 23f8082d7bfad95f4c9fbb8201e3e58c3104928f..f4c2b52e58b36eb44bcb6be2a2428bc0458f7660 100644 (file)
@@ -19,10 +19,6 @@ CFLAGS_init.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
 CFLAGS_btext.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
 CFLAGS_prom.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
 
-# -fstack-protector triggers protection checks in this code,
-# but it is being used too early to link to meaningful stack_chk logic.
-CFLAGS_prom_init.o += $(call cc-option, -fno-stack-protector)
-
 ifdef CONFIG_FUNCTION_TRACER
 # Do not trace early boot code
 CFLAGS_REMOVE_cputable.o = -mno-sched-epilog $(CC_FLAGS_FTRACE)
index 0601e6a7297c64ea4b2129011d32ae42a662ac07..9e8e771f8acb46684d233a602c87665f156ebfbe 100644 (file)
@@ -91,9 +91,6 @@ int main(void)
        DEFINE(TI_livepatch_sp, offsetof(struct thread_info, livepatch_sp));
 #endif
 
-#ifdef CONFIG_CC_STACKPROTECTOR
-       DEFINE(TSK_STACK_CANARY, offsetof(struct task_struct, stack_canary));
-#endif
        DEFINE(KSP, offsetof(struct thread_struct, ksp));
        DEFINE(PT_REGS, offsetof(struct thread_struct, regs));
 #ifdef CONFIG_BOOKE
@@ -252,9 +249,9 @@ int main(void)
        DEFINE(ACCOUNT_STARTTIME_USER,
               offsetof(struct paca_struct, accounting.starttime_user));
        DEFINE(ACCOUNT_USER_TIME,
-              offsetof(struct paca_struct, accounting.user_time));
+              offsetof(struct paca_struct, accounting.utime));
        DEFINE(ACCOUNT_SYSTEM_TIME,
-              offsetof(struct paca_struct, accounting.system_time));
+              offsetof(struct paca_struct, accounting.stime));
        DEFINE(PACA_TRAP_SAVE, offsetof(struct paca_struct, trap_save));
        DEFINE(PACA_NAPSTATELOST, offsetof(struct paca_struct, nap_state_lost));
        DEFINE(PACA_SPRG_VDSO, offsetof(struct paca_struct, sprg_vdso));
@@ -265,9 +262,9 @@ int main(void)
        DEFINE(ACCOUNT_STARTTIME_USER,
               offsetof(struct thread_info, accounting.starttime_user));
        DEFINE(ACCOUNT_USER_TIME,
-              offsetof(struct thread_info, accounting.user_time));
+              offsetof(struct thread_info, accounting.utime));
        DEFINE(ACCOUNT_SYSTEM_TIME,
-              offsetof(struct thread_info, accounting.system_time));
+              offsetof(struct thread_info, accounting.stime));
 #endif
 #endif /* CONFIG_PPC64 */
 
index d88573bdd0907c6682cf03395bc296155cea9124..b94887165a101557c97fd6a53139ae7df67dbbd9 100644 (file)
@@ -545,7 +545,7 @@ static void *eeh_pe_detach_dev(void *data, void *userdata)
 static void *__eeh_clear_pe_frozen_state(void *data, void *flag)
 {
        struct eeh_pe *pe = (struct eeh_pe *)data;
-       bool *clear_sw_state = flag;
+       bool clear_sw_state = *(bool *)flag;
        int i, rc = 1;
 
        for (i = 0; rc && i < 3; i++)
index 5742dbdbee4677924ebf0019b891e43879410131..3841d749a430069f4d4f2705c4199c08609b3757 100644 (file)
@@ -674,11 +674,7 @@ BEGIN_FTR_SECTION
        mtspr   SPRN_SPEFSCR,r0         /* restore SPEFSCR reg */
 END_FTR_SECTION_IFSET(CPU_FTR_SPE)
 #endif /* CONFIG_SPE */
-#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
-       lwz     r0,TSK_STACK_CANARY(r2)
-       lis     r4,__stack_chk_guard@ha
-       stw     r0,__stack_chk_guard@l(r4)
-#endif
+
        lwz     r0,_CCR(r1)
        mtcrf   0xFF,r0
        /* r3-r12 are destroyed -- Cort */
index bb1807184bad5da5f9b65ce69087184e67534c03..0b0f89685b679745895251b011d0db522d4558e1 100644 (file)
@@ -286,14 +286,6 @@ static void dedotify_versions(struct modversion_info *vers,
        for (end = (void *)vers + size; vers < end; vers++)
                if (vers->name[0] == '.') {
                        memmove(vers->name, vers->name+1, strlen(vers->name));
-#ifdef ARCH_RELOCATES_KCRCTAB
-                       /* The TOC symbol has no CRC computed. To avoid CRC
-                        * check failing, we must force it to the expected
-                        * value (see CRC check in module.c).
-                        */
-                       if (!strcmp(vers->name, "TOC."))
-                               vers->crc = -(unsigned long)reloc_start;
-#endif
                }
 }
 
index 04885cec24df1413f90121115cf6569e4aa444e6..5dd056df0baaec576431adb4cbe8ab370d8f44c1 100644 (file)
 #include <linux/kprobes.h>
 #include <linux/kdebug.h>
 
-#ifdef CONFIG_CC_STACKPROTECTOR
-#include <linux/stackprotector.h>
-unsigned long __stack_chk_guard __read_mostly;
-EXPORT_SYMBOL(__stack_chk_guard);
-#endif
-
 /* Transactional Memory debug */
 #ifdef TM_DEBUG_SW
 #define TM_DEBUG(x...) printk(KERN_INFO x)
index ec47a939cbdd6dd81c6c05ed6707f28e12d9f0ea..ac83eb04a8b871293c53e7bd6ff4d439b89704a9 100644 (file)
@@ -2834,6 +2834,9 @@ static void __init prom_find_boot_cpu(void)
 
        cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu);
 
+       if (!PHANDLE_VALID(cpu_pkg))
+               return;
+
        prom_getprop(cpu_pkg, "reg", &rval, sizeof(rval));
        prom.cpu = be32_to_cpu(rval);
 
index bc2e08d415fa3af8fea6d6f3e63b75e2900c78ff..14e485525e317907c52d0183c88d8887cc77ff97 100644 (file)
@@ -57,6 +57,7 @@
 #include <linux/clk-provider.h>
 #include <linux/suspend.h>
 #include <linux/rtc.h>
+#include <linux/cputime.h>
 #include <asm/trace.h>
 
 #include <asm/io.h>
@@ -72,7 +73,6 @@
 #include <asm/smp.h>
 #include <asm/vdso_datapage.h>
 #include <asm/firmware.h>
-#include <asm/cputime.h>
 #include <asm/asm-prototypes.h>
 
 /* powerpc clocksource/clockevent code */
@@ -152,20 +152,11 @@ EXPORT_SYMBOL_GPL(ppc_tb_freq);
 
 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
 /*
- * Factors for converting from cputime_t (timebase ticks) to
- * jiffies, microseconds, seconds, and clock_t (1/USER_HZ seconds).
- * These are all stored as 0.64 fixed-point binary fractions.
+ * Factor for converting from cputime_t (timebase ticks) to
+ * microseconds. This is stored as 0.64 fixed-point binary fraction.
  */
-u64 __cputime_jiffies_factor;
-EXPORT_SYMBOL(__cputime_jiffies_factor);
 u64 __cputime_usec_factor;
 EXPORT_SYMBOL(__cputime_usec_factor);
-u64 __cputime_sec_factor;
-EXPORT_SYMBOL(__cputime_sec_factor);
-u64 __cputime_clockt_factor;
-EXPORT_SYMBOL(__cputime_clockt_factor);
-
-cputime_t cputime_one_jiffy;
 
 #ifdef CONFIG_PPC_SPLPAR
 void (*dtl_consumer)(struct dtl_entry *, u64);
@@ -181,14 +172,8 @@ static void calc_cputime_factors(void)
 {
        struct div_result res;
 
-       div128_by_32(HZ, 0, tb_ticks_per_sec, &res);
-       __cputime_jiffies_factor = res.result_low;
        div128_by_32(1000000, 0, tb_ticks_per_sec, &res);
        __cputime_usec_factor = res.result_low;
-       div128_by_32(1, 0, tb_ticks_per_sec, &res);
-       __cputime_sec_factor = res.result_low;
-       div128_by_32(USER_HZ, 0, tb_ticks_per_sec, &res);
-       __cputime_clockt_factor = res.result_low;
 }
 
 /*
@@ -271,25 +256,19 @@ void accumulate_stolen_time(void)
 
        sst = scan_dispatch_log(acct->starttime_user);
        ust = scan_dispatch_log(acct->starttime);
-       acct->system_time -= sst;
-       acct->user_time -= ust;
-       local_paca->stolen_time += ust + sst;
+       acct->stime -= sst;
+       acct->utime -= ust;
+       acct->steal_time += ust + sst;
 
        local_paca->soft_enabled = save_soft_enabled;
 }
 
 static inline u64 calculate_stolen_time(u64 stop_tb)
 {
-       u64 stolen = 0;
+       if (get_paca()->dtl_ridx != be64_to_cpu(get_lppaca()->dtl_idx))
+               return scan_dispatch_log(stop_tb);
 
-       if (get_paca()->dtl_ridx != be64_to_cpu(get_lppaca()->dtl_idx)) {
-               stolen = scan_dispatch_log(stop_tb);
-               get_paca()->accounting.system_time -= stolen;
-       }
-
-       stolen += get_paca()->stolen_time;
-       get_paca()->stolen_time = 0;
-       return stolen;
+       return 0;
 }
 
 #else /* CONFIG_PPC_SPLPAR */
@@ -305,28 +284,27 @@ static inline u64 calculate_stolen_time(u64 stop_tb)
  * or soft irq state.
  */
 static unsigned long vtime_delta(struct task_struct *tsk,
-                                unsigned long *sys_scaled,
-                                unsigned long *stolen)
+                                unsigned long *stime_scaled,
+                                unsigned long *steal_time)
 {
        unsigned long now, nowscaled, deltascaled;
-       unsigned long udelta, delta, user_scaled;
+       unsigned long stime;
+       unsigned long utime, utime_scaled;
        struct cpu_accounting_data *acct = get_accounting(tsk);
 
        WARN_ON_ONCE(!irqs_disabled());
 
        now = mftb();
        nowscaled = read_spurr(now);
-       acct->system_time += now - acct->starttime;
+       stime = now - acct->starttime;
        acct->starttime = now;
        deltascaled = nowscaled - acct->startspurr;
        acct->startspurr = nowscaled;
 
-       *stolen = calculate_stolen_time(now);
+       *steal_time = calculate_stolen_time(now);
 
-       delta = acct->system_time;
-       acct->system_time = 0;
-       udelta = acct->user_time - acct->utime_sspurr;
-       acct->utime_sspurr = acct->user_time;
+       utime = acct->utime - acct->utime_sspurr;
+       acct->utime_sspurr = acct->utime;
 
        /*
         * Because we don't read the SPURR on every kernel entry/exit,
@@ -338,62 +316,105 @@ static unsigned long vtime_delta(struct task_struct *tsk,
         * the user ticks get saved up in paca->user_time_scaled to be
         * used by account_process_tick.
         */
-       *sys_scaled = delta;
-       user_scaled = udelta;
-       if (deltascaled != delta + udelta) {
-               if (udelta) {
-                       *sys_scaled = deltascaled * delta / (delta + udelta);
-                       user_scaled = deltascaled - *sys_scaled;
+       *stime_scaled = stime;
+       utime_scaled = utime;
+       if (deltascaled != stime + utime) {
+               if (utime) {
+                       *stime_scaled = deltascaled * stime / (stime + utime);
+                       utime_scaled = deltascaled - *stime_scaled;
                } else {
-                       *sys_scaled = deltascaled;
+                       *stime_scaled = deltascaled;
                }
        }
-       acct->user_time_scaled += user_scaled;
+       acct->utime_scaled += utime_scaled;
 
-       return delta;
+       return stime;
 }
 
 void vtime_account_system(struct task_struct *tsk)
 {
-       unsigned long delta, sys_scaled, stolen;
+       unsigned long stime, stime_scaled, steal_time;
+       struct cpu_accounting_data *acct = get_accounting(tsk);
+
+       stime = vtime_delta(tsk, &stime_scaled, &steal_time);
 
-       delta = vtime_delta(tsk, &sys_scaled, &stolen);
-       account_system_time(tsk, 0, delta);
-       tsk->stimescaled += sys_scaled;
-       if (stolen)
-               account_steal_time(stolen);
+       stime -= min(stime, steal_time);
+       acct->steal_time += steal_time;
+
+       if ((tsk->flags & PF_VCPU) && !irq_count()) {
+               acct->gtime += stime;
+               acct->utime_scaled += stime_scaled;
+       } else {
+               if (hardirq_count())
+                       acct->hardirq_time += stime;
+               else if (in_serving_softirq())
+                       acct->softirq_time += stime;
+               else
+                       acct->stime += stime;
+
+               acct->stime_scaled += stime_scaled;
+       }
 }
 EXPORT_SYMBOL_GPL(vtime_account_system);
 
 void vtime_account_idle(struct task_struct *tsk)
 {
-       unsigned long delta, sys_scaled, stolen;
+       unsigned long stime, stime_scaled, steal_time;
+       struct cpu_accounting_data *acct = get_accounting(tsk);
 
-       delta = vtime_delta(tsk, &sys_scaled, &stolen);
-       account_idle_time(delta + stolen);
+       stime = vtime_delta(tsk, &stime_scaled, &steal_time);
+       acct->idle_time += stime + steal_time;
 }
 
 /*
- * Transfer the user time accumulated in the paca
- * by the exception entry and exit code to the generic
- * process user time records.
+ * Account the whole cputime accumulated in the paca
  * Must be called with interrupts disabled.
  * Assumes that vtime_account_system/idle() has been called
  * recently (i.e. since the last entry from usermode) so that
  * get_paca()->user_time_scaled is up to date.
  */
-void vtime_account_user(struct task_struct *tsk)
+void vtime_flush(struct task_struct *tsk)
 {
-       cputime_t utime, utimescaled;
        struct cpu_accounting_data *acct = get_accounting(tsk);
 
-       utime = acct->user_time;
-       utimescaled = acct->user_time_scaled;
-       acct->user_time = 0;
-       acct->user_time_scaled = 0;
+       if (acct->utime)
+               account_user_time(tsk, cputime_to_nsecs(acct->utime));
+
+       if (acct->utime_scaled)
+               tsk->utimescaled += cputime_to_nsecs(acct->utime_scaled);
+
+       if (acct->gtime)
+               account_guest_time(tsk, cputime_to_nsecs(acct->gtime));
+
+       if (acct->steal_time)
+               account_steal_time(cputime_to_nsecs(acct->steal_time));
+
+       if (acct->idle_time)
+               account_idle_time(cputime_to_nsecs(acct->idle_time));
+
+       if (acct->stime)
+               account_system_index_time(tsk, cputime_to_nsecs(acct->stime),
+                                         CPUTIME_SYSTEM);
+       if (acct->stime_scaled)
+               tsk->stimescaled += cputime_to_nsecs(acct->stime_scaled);
+
+       if (acct->hardirq_time)
+               account_system_index_time(tsk, cputime_to_nsecs(acct->hardirq_time),
+                                         CPUTIME_IRQ);
+       if (acct->softirq_time)
+               account_system_index_time(tsk, cputime_to_nsecs(acct->softirq_time),
+                                         CPUTIME_SOFTIRQ);
+
+       acct->utime = 0;
+       acct->utime_scaled = 0;
        acct->utime_sspurr = 0;
-       account_user_time(tsk, utime);
-       tsk->utimescaled += utimescaled;
+       acct->gtime = 0;
+       acct->steal_time = 0;
+       acct->idle_time = 0;
+       acct->stime = 0;
+       acct->stime_scaled = 0;
+       acct->hardirq_time = 0;
+       acct->softirq_time = 0;
 }
 
 #ifdef CONFIG_PPC32
@@ -407,8 +428,7 @@ void arch_vtime_task_switch(struct task_struct *prev)
        struct cpu_accounting_data *acct = get_accounting(current);
 
        acct->starttime = get_accounting(prev)->starttime;
-       acct->system_time = 0;
-       acct->user_time = 0;
+       acct->startspurr = get_accounting(prev)->startspurr;
 }
 #endif /* CONFIG_PPC32 */
 
@@ -1018,7 +1038,6 @@ void __init time_init(void)
        tb_ticks_per_sec = ppc_tb_freq;
        tb_ticks_per_usec = ppc_tb_freq / 1000000;
        calc_cputime_factors();
-       setup_cputime_one_jiffy();
 
        /*
         * Compute scale factor for sched_clock.
index 6fd30ac7d14a0d2761e82d479fe01724bd53a38d..62a50d6d1053c9f1e3854ecb9de877e0f02cbb3b 100644 (file)
@@ -253,8 +253,11 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
        if (unlikely(debugger_fault_handler(regs)))
                goto bail;
 
-       /* On a kernel SLB miss we can only check for a valid exception entry */
-       if (!user_mode(regs) && (address >= TASK_SIZE)) {
+       /*
+        * The kernel should never take an execute fault nor should it
+        * take a page fault to a kernel address.
+        */
+       if (!user_mode(regs) && (is_exec || (address >= TASK_SIZE))) {
                rc = SIGSEGV;
                goto bail;
        }
@@ -390,20 +393,6 @@ good_area:
 #endif /* CONFIG_8xx */
 
        if (is_exec) {
-               /*
-                * An execution fault + no execute ?
-                *
-                * On CPUs that don't have CPU_FTR_COHERENT_ICACHE we
-                * deliberately create NX mappings, and use the fault to do the
-                * cache flush. This is usually handled in hash_page_do_lazy_icache()
-                * but we could end up here if that races with a concurrent PTE
-                * update. In that case we need to fall through here to the VMA
-                * check below.
-                */
-               if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE) &&
-                       (regs->msr & SRR1_ISI_N_OR_G))
-                       goto bad_area;
-
                /*
                 * Allow execution from readable areas if the MMU does not
                 * provide separate controls over reading and executing.
index 93abf8a9813d5c2d47a1aa150cafa4076111b0db..8e1588021d1c90d37073d91962d8822777a152fe 100644 (file)
@@ -347,7 +347,8 @@ early_param("disable_radix", parse_disable_radix);
 void __init mmu_early_init_devtree(void)
 {
        /* Disable radix mode based on kernel command line. */
-       if (disable_radix)
+       /* We don't yet have the machinery to do radix as a guest. */
+       if (disable_radix || !(mfmsr() & MSR_HV))
                cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
 
        if (early_radix_enabled())
index cfa53ccc8bafc908e80532a4a64e44ad358dffc7..34f1a0dbc898ee4a28a6adcd41c0ce5d3fe61198 100644 (file)
@@ -65,7 +65,7 @@ int radix__map_kernel_page(unsigned long ea, unsigned long pa,
                if (!pmdp)
                        return -ENOMEM;
                if (map_page_size == PMD_SIZE) {
-                       ptep = (pte_t *)pudp;
+                       ptep = pmdp_ptep(pmdp);
                        goto set_the_pte;
                }
                ptep = pte_alloc_kernel(pmdp, ea);
@@ -90,7 +90,7 @@ int radix__map_kernel_page(unsigned long ea, unsigned long pa,
                }
                pmdp = pmd_offset(pudp, ea);
                if (map_page_size == PMD_SIZE) {
-                       ptep = (pte_t *)pudp;
+                       ptep = pmdp_ptep(pmdp);
                        goto set_the_pte;
                }
                if (!pmd_present(*pmdp)) {
index 61b79119065ff3030162df12446a67c712cd6649..952713d6cf04d0e826a685f181561d587df41257 100644 (file)
@@ -50,9 +50,7 @@ static inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
        for (set = 0; set < POWER9_TLB_SETS_RADIX ; set++) {
                __tlbiel_pid(pid, set, ric);
        }
-       if (cpu_has_feature(CPU_FTR_POWER9_DD1))
-               asm volatile(PPC_INVALIDATE_ERAT : : :"memory");
-       return;
+       asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory");
 }
 
 static inline void _tlbie_pid(unsigned long pid, unsigned long ric)
@@ -85,8 +83,6 @@ static inline void _tlbiel_va(unsigned long va, unsigned long pid,
        asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
                     : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
        asm volatile("ptesync": : :"memory");
-       if (cpu_has_feature(CPU_FTR_POWER9_DD1))
-               asm volatile(PPC_INVALIDATE_ERAT : : :"memory");
 }
 
 static inline void _tlbie_va(unsigned long va, unsigned long pid,
index c789258ae1e1ce656be3e4c1bd1a9b28523f307e..eec0e8d0454d11859e83aeef105a07010b1b9aac 100644 (file)
@@ -155,8 +155,10 @@ static void pnv_smp_cpu_kill_self(void)
                wmask = SRR1_WAKEMASK_P8;
 
        idle_states = pnv_get_supported_cpuidle_states();
+
        /* We don't want to take decrementer interrupts while we are offline,
-        * so clear LPCR:PECE1. We keep PECE2 enabled.
+        * so clear LPCR:PECE1. We keep PECE2 (and LPCR_PECE_HVEE on P9)
+        * enabled as to let IPIs in.
         */
        mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1);
 
@@ -206,8 +208,12 @@ static void pnv_smp_cpu_kill_self(void)
                 * contains 0.
                 */
                if (((srr1 & wmask) == SRR1_WAKEEE) ||
+                   ((srr1 & wmask) == SRR1_WAKEHVI) ||
                    (local_paca->irq_happened & PACA_IRQ_EE)) {
-                       icp_native_flush_interrupt();
+                       if (cpu_has_feature(CPU_FTR_ARCH_300))
+                               icp_opal_flush_interrupt();
+                       else
+                               icp_native_flush_interrupt();
                } else if ((srr1 & wmask) == SRR1_WAKEHDBELL) {
                        unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
                        asm volatile(PPC_MSGCLR(%0) : : "r" (msg));
@@ -221,6 +227,8 @@ static void pnv_smp_cpu_kill_self(void)
                if (srr1 && !generic_check_cpu_restart(cpu))
                        DBG("CPU%d Unexpected exit while offline !\n", cpu);
        }
+
+       /* Re-enable decrementer interrupts */
        mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_PECE1);
        DBG("CPU%d coming online...\n", cpu);
 }
index 60c57657c772fef576e5c4703dfb2a17203978a7..f9670eabfcfa70ca338aa0c5f2e10217803c7162 100644 (file)
@@ -120,18 +120,49 @@ static void icp_opal_cause_ipi(int cpu, unsigned long data)
 {
        int hw_cpu = get_hard_smp_processor_id(cpu);
 
+       kvmppc_set_host_ipi(cpu, 1);
        opal_int_set_mfrr(hw_cpu, IPI_PRIORITY);
 }
 
 static irqreturn_t icp_opal_ipi_action(int irq, void *dev_id)
 {
-       int hw_cpu = hard_smp_processor_id();
+       int cpu = smp_processor_id();
 
-       opal_int_set_mfrr(hw_cpu, 0xff);
+       kvmppc_set_host_ipi(cpu, 0);
+       opal_int_set_mfrr(get_hard_smp_processor_id(cpu), 0xff);
 
        return smp_ipi_demux();
 }
 
+/*
+ * Called when an interrupt is received on an off-line CPU to
+ * clear the interrupt, so that the CPU can go back to nap mode.
+ */
+void icp_opal_flush_interrupt(void)
+{
+       unsigned int xirr;
+       unsigned int vec;
+
+       do {
+               xirr = icp_opal_get_xirr();
+               vec = xirr & 0x00ffffff;
+               if (vec == XICS_IRQ_SPURIOUS)
+                       break;
+               if (vec == XICS_IPI) {
+                       /* Clear pending IPI */
+                       int cpu = smp_processor_id();
+                       kvmppc_set_host_ipi(cpu, 0);
+                       opal_int_set_mfrr(get_hard_smp_processor_id(cpu), 0xff);
+               } else {
+                       pr_err("XICS: hw interrupt 0x%x to offline cpu, "
+                              "disabling\n", vec);
+                       xics_mask_unknown_vec(vec);
+               }
+
+               /* EOI the interrupt */
+       } while (opal_int_eoi(xirr) > 0);
+}
+
 #endif /* CONFIG_SMP */
 
 static const struct icp_ops icp_opal_ops = {
index 9c0e17cf6886cdea0ad539f1c53ecbd7a66c502b..3f864c36d8478751a96f06e0483fb5b10186e014 100644 (file)
@@ -2287,14 +2287,14 @@ static void dump_one_paca(int cpu)
        DUMP(p, subcore_sibling_mask, "x");
 #endif
 
-       DUMP(p, accounting.user_time, "llx");
-       DUMP(p, accounting.system_time, "llx");
-       DUMP(p, accounting.user_time_scaled, "llx");
+       DUMP(p, accounting.utime, "llx");
+       DUMP(p, accounting.stime, "llx");
+       DUMP(p, accounting.utime_scaled, "llx");
        DUMP(p, accounting.starttime, "llx");
        DUMP(p, accounting.starttime_user, "llx");
        DUMP(p, accounting.startspurr, "llx");
        DUMP(p, accounting.utime_sspurr, "llx");
-       DUMP(p, stolen_time, "llx");
+       DUMP(p, accounting.steal_time, "llx");
 #undef DUMP
 
        catch_memory_errors = 0;
index 69b23b25ac34a4f3f6c6faeb305648ef5a7e5b63..08b9e942a262eda28393f5d982f4f8d60093ca4b 100644 (file)
@@ -113,21 +113,21 @@ static void appldata_get_os_data(void *data)
        j = 0;
        for_each_online_cpu(i) {
                os_data->os_cpu[j].per_cpu_user =
-                       cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_USER]);
+                       nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_USER]);
                os_data->os_cpu[j].per_cpu_nice =
-                       cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_NICE]);
+                       nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_NICE]);
                os_data->os_cpu[j].per_cpu_system =
-                       cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM]);
+                       nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM]);
                os_data->os_cpu[j].per_cpu_idle =
-                       cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IDLE]);
+                       nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IDLE]);
                os_data->os_cpu[j].per_cpu_irq =
-                       cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IRQ]);
+                       nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IRQ]);
                os_data->os_cpu[j].per_cpu_softirq =
-                       cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ]);
+                       nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ]);
                os_data->os_cpu[j].per_cpu_iowait =
-                       cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IOWAIT]);
+                       nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IOWAIT]);
                os_data->os_cpu[j].per_cpu_steal =
-                       cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_STEAL]);
+                       nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_STEAL]);
                os_data->os_cpu[j].cpu_id = i;
                j++;
        }
index 221b454c734a24481c02aee8e0168d0e11f1fec7..d1c407ddf7032de5a43d08aa48438abda7ab1e91 100644 (file)
@@ -25,33 +25,6 @@ static inline unsigned long __div(unsigned long long n, unsigned long base)
        return n / base;
 }
 
-#define cputime_one_jiffy              jiffies_to_cputime(1)
-
-/*
- * Convert cputime to jiffies and back.
- */
-static inline unsigned long cputime_to_jiffies(const cputime_t cputime)
-{
-       return __div((__force unsigned long long) cputime, CPUTIME_PER_SEC / HZ);
-}
-
-static inline cputime_t jiffies_to_cputime(const unsigned int jif)
-{
-       return (__force cputime_t)(jif * (CPUTIME_PER_SEC / HZ));
-}
-
-static inline u64 cputime64_to_jiffies64(cputime64_t cputime)
-{
-       unsigned long long jif = (__force unsigned long long) cputime;
-       do_div(jif, CPUTIME_PER_SEC / HZ);
-       return jif;
-}
-
-static inline cputime64_t jiffies64_to_cputime64(const u64 jif)
-{
-       return (__force cputime64_t)(jif * (CPUTIME_PER_SEC / HZ));
-}
-
 /*
  * Convert cputime to microseconds and back.
  */
@@ -60,88 +33,8 @@ static inline unsigned int cputime_to_usecs(const cputime_t cputime)
        return (__force unsigned long long) cputime >> 12;
 }
 
-static inline cputime_t usecs_to_cputime(const unsigned int m)
-{
-       return (__force cputime_t)(m * CPUTIME_PER_USEC);
-}
-
-#define usecs_to_cputime64(m)          usecs_to_cputime(m)
-
-/*
- * Convert cputime to milliseconds and back.
- */
-static inline unsigned int cputime_to_secs(const cputime_t cputime)
-{
-       return __div((__force unsigned long long) cputime, CPUTIME_PER_SEC / 2) >> 1;
-}
-
-static inline cputime_t secs_to_cputime(const unsigned int s)
-{
-       return (__force cputime_t)(s * CPUTIME_PER_SEC);
-}
-
-/*
- * Convert cputime to timespec and back.
- */
-static inline cputime_t timespec_to_cputime(const struct timespec *value)
-{
-       unsigned long long ret = value->tv_sec * CPUTIME_PER_SEC;
-       return (__force cputime_t)(ret + __div(value->tv_nsec * CPUTIME_PER_USEC, NSEC_PER_USEC));
-}
-
-static inline void cputime_to_timespec(const cputime_t cputime,
-                                      struct timespec *value)
-{
-       unsigned long long __cputime = (__force unsigned long long) cputime;
-       value->tv_nsec = (__cputime % CPUTIME_PER_SEC) * NSEC_PER_USEC / CPUTIME_PER_USEC;
-       value->tv_sec = __cputime / CPUTIME_PER_SEC;
-}
-
-/*
- * Convert cputime to timeval and back.
- * Since cputime and timeval have the same resolution (microseconds)
- * this is easy.
- */
-static inline cputime_t timeval_to_cputime(const struct timeval *value)
-{
-       unsigned long long ret = value->tv_sec * CPUTIME_PER_SEC;
-       return (__force cputime_t)(ret + value->tv_usec * CPUTIME_PER_USEC);
-}
-
-static inline void cputime_to_timeval(const cputime_t cputime,
-                                     struct timeval *value)
-{
-       unsigned long long __cputime = (__force unsigned long long) cputime;
-       value->tv_usec = (__cputime % CPUTIME_PER_SEC) / CPUTIME_PER_USEC;
-       value->tv_sec = __cputime / CPUTIME_PER_SEC;
-}
-
-/*
- * Convert cputime to clock and back.
- */
-static inline clock_t cputime_to_clock_t(cputime_t cputime)
-{
-       unsigned long long clock = (__force unsigned long long) cputime;
-       do_div(clock, CPUTIME_PER_SEC / USER_HZ);
-       return clock;
-}
-
-static inline cputime_t clock_t_to_cputime(unsigned long x)
-{
-       return (__force cputime_t)(x * (CPUTIME_PER_SEC / USER_HZ));
-}
-
-/*
- * Convert cputime64 to clock.
- */
-static inline clock_t cputime64_to_clock_t(cputime64_t cputime)
-{
-       unsigned long long clock = (__force unsigned long long) cputime;
-       do_div(clock, CPUTIME_PER_SEC / USER_HZ);
-       return clock;
-}
 
-cputime64_t arch_cpu_idle_time(int cpu);
+u64 arch_cpu_idle_time(int cpu);
 
 #define arch_idle_time(cpu) arch_cpu_idle_time(cpu)
 
index 9bfad2ad63129916b46471ed97525b255100da48..61261e0e95c069abb7c8cb2a310f608af9ee045e 100644 (file)
@@ -85,53 +85,56 @@ struct lowcore {
        __u64   mcck_enter_timer;               /* 0x02c0 */
        __u64   exit_timer;                     /* 0x02c8 */
        __u64   user_timer;                     /* 0x02d0 */
-       __u64   system_timer;                   /* 0x02d8 */
-       __u64   steal_timer;                    /* 0x02e0 */
-       __u64   last_update_timer;              /* 0x02e8 */
-       __u64   last_update_clock;              /* 0x02f0 */
-       __u64   int_clock;                      /* 0x02f8 */
-       __u64   mcck_clock;                     /* 0x0300 */
-       __u64   clock_comparator;               /* 0x0308 */
+       __u64   guest_timer;                    /* 0x02d8 */
+       __u64   system_timer;                   /* 0x02e0 */
+       __u64   hardirq_timer;                  /* 0x02e8 */
+       __u64   softirq_timer;                  /* 0x02f0 */
+       __u64   steal_timer;                    /* 0x02f8 */
+       __u64   last_update_timer;              /* 0x0300 */
+       __u64   last_update_clock;              /* 0x0308 */
+       __u64   int_clock;                      /* 0x0310 */
+       __u64   mcck_clock;                     /* 0x0318 */
+       __u64   clock_comparator;               /* 0x0320 */
 
        /* Current process. */
-       __u64   current_task;                   /* 0x0310 */
-       __u8    pad_0x318[0x320-0x318];         /* 0x0318 */
-       __u64   kernel_stack;                   /* 0x0320 */
+       __u64   current_task;                   /* 0x0328 */
+       __u8    pad_0x318[0x320-0x318];         /* 0x0330 */
+       __u64   kernel_stack;                   /* 0x0338 */
 
        /* Interrupt, panic and restart stack. */
-       __u64   async_stack;                    /* 0x0328 */
-       __u64   panic_stack;                    /* 0x0330 */
-       __u64   restart_stack;                  /* 0x0338 */
+       __u64   async_stack;                    /* 0x0340 */
+       __u64   panic_stack;                    /* 0x0348 */
+       __u64   restart_stack;                  /* 0x0350 */
 
        /* Restart function and parameter. */
-       __u64   restart_fn;                     /* 0x0340 */
-       __u64   restart_data;                   /* 0x0348 */
-       __u64   restart_source;                 /* 0x0350 */
+       __u64   restart_fn;                     /* 0x0358 */
+       __u64   restart_data;                   /* 0x0360 */
+       __u64   restart_source;                 /* 0x0368 */
 
        /* Address space pointer. */
-       __u64   kernel_asce;                    /* 0x0358 */
-       __u64   user_asce;                      /* 0x0360 */
+       __u64   kernel_asce;                    /* 0x0370 */
+       __u64   user_asce;                      /* 0x0378 */
 
        /*
         * The lpp and current_pid fields form a
         * 64-bit value that is set as program
         * parameter with the LPP instruction.
         */
-       __u32   lpp;                            /* 0x0368 */
-       __u32   current_pid;                    /* 0x036c */
+       __u32   lpp;                            /* 0x0380 */
+       __u32   current_pid;                    /* 0x0384 */
 
        /* SMP info area */
-       __u32   cpu_nr;                         /* 0x0370 */
-       __u32   softirq_pending;                /* 0x0374 */
-       __u64   percpu_offset;                  /* 0x0378 */
-       __u64   vdso_per_cpu_data;              /* 0x0380 */
-       __u64   machine_flags;                  /* 0x0388 */
-       __u32   preempt_count;                  /* 0x0390 */
-       __u8    pad_0x0394[0x0398-0x0394];      /* 0x0394 */
-       __u64   gmap;                           /* 0x0398 */
-       __u32   spinlock_lockval;               /* 0x03a0 */
-       __u32   fpu_flags;                      /* 0x03a4 */
-       __u8    pad_0x03a8[0x0400-0x03a8];      /* 0x03a8 */
+       __u32   cpu_nr;                         /* 0x0388 */
+       __u32   softirq_pending;                /* 0x038c */
+       __u64   percpu_offset;                  /* 0x0390 */
+       __u64   vdso_per_cpu_data;              /* 0x0398 */
+       __u64   machine_flags;                  /* 0x03a0 */
+       __u32   preempt_count;                  /* 0x03a8 */
+       __u8    pad_0x03ac[0x03b0-0x03ac];      /* 0x03ac */
+       __u64   gmap;                           /* 0x03b0 */
+       __u32   spinlock_lockval;               /* 0x03b8 */
+       __u32   fpu_flags;                      /* 0x03bc */
+       __u8    pad_0x03c0[0x0400-0x03c0];      /* 0x03c0 */
 
        /* Per cpu primary space access list */
        __u32   paste[16];                      /* 0x0400 */
index 6bca916a5ba05a272ecdc5e252616963581d46cc..977a5b6501b879a043cc3544f0c8fa299723293a 100644 (file)
@@ -111,7 +111,10 @@ struct thread_struct {
        unsigned int  acrs[NUM_ACRS];
         unsigned long ksp;              /* kernel stack pointer             */
        unsigned long user_timer;       /* task cputime in user space */
+       unsigned long guest_timer;      /* task cputime in kvm guest */
        unsigned long system_timer;     /* task cputime in kernel space */
+       unsigned long hardirq_timer;    /* task cputime in hardirq context */
+       unsigned long softirq_timer;    /* task cputime in softirq context */
        unsigned long sys_call_table;   /* system call table address */
        mm_segment_t mm_segment;
        unsigned long gmap_addr;        /* address of last gmap fault. */
index 7a55c29b0b33a8ab4bbdc0c1ca7d7ba2dacdc3ab..d3bf69ef42cff9d00fcd30c89fc0c10ee88037bc 100644 (file)
@@ -12,7 +12,7 @@
 #include <linux/notifier.h>
 #include <linux/init.h>
 #include <linux/cpu.h>
-#include <asm/cputime.h>
+#include <linux/cputime.h>
 #include <asm/nmi.h>
 #include <asm/smp.h>
 #include "entry.h"
@@ -43,7 +43,7 @@ void enabled_wait(void)
        idle->clock_idle_enter = idle->clock_idle_exit = 0ULL;
        idle->idle_time += idle_time;
        idle->idle_count++;
-       account_idle_time(idle_time);
+       account_idle_time(cputime_to_nsecs(idle_time));
        write_seqcount_end(&idle->seqcount);
 }
 NOKPROBE_SYMBOL(enabled_wait);
@@ -84,7 +84,7 @@ static ssize_t show_idle_time(struct device *dev,
 }
 DEVICE_ATTR(idle_time_us, 0444, show_idle_time, NULL);
 
-cputime64_t arch_cpu_idle_time(int cpu)
+u64 arch_cpu_idle_time(int cpu)
 {
        struct s390_idle_data *idle = &per_cpu(s390_idle, cpu);
        unsigned long long now, idle_enter, idle_exit;
@@ -96,7 +96,8 @@ cputime64_t arch_cpu_idle_time(int cpu)
                idle_enter = ACCESS_ONCE(idle->clock_idle_enter);
                idle_exit = ACCESS_ONCE(idle->clock_idle_exit);
        } while (read_seqcount_retry(&idle->seqcount, seq));
-       return idle_enter ? ((idle_exit ?: now) - idle_enter) : 0;
+
+       return cputime_to_nsecs(idle_enter ? ((idle_exit ?: now) - idle_enter) : 0);
 }
 
 void arch_cpu_idle_enter(void)
index 7447ba509c30eb0b409598d062ea408603f313af..12020b55887bfd258e6545e687ec4a9de4fdb214 100644 (file)
@@ -963,6 +963,11 @@ static int s390_fpregs_set(struct task_struct *target,
        if (target == current)
                save_fpu_regs();
 
+       if (MACHINE_HAS_VX)
+               convert_vx_to_fp(fprs, target->thread.fpu.vxrs);
+       else
+               memcpy(&fprs, target->thread.fpu.fprs, sizeof(fprs));
+
        /* If setting FPC, must validate it first. */
        if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) {
                u32 ufpc[2] = { target->thread.fpu.fpc, 0 };
@@ -1067,6 +1072,9 @@ static int s390_vxrs_low_set(struct task_struct *target,
        if (target == current)
                save_fpu_regs();
 
+       for (i = 0; i < __NUM_VXRS_LOW; i++)
+               vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1);
+
        rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
        if (rc == 0)
                for (i = 0; i < __NUM_VXRS_LOW; i++)
index 1b5c5ee9fc1b60878844cd67cb6a6cb12800a563..b4a3e9e06ef244967b440057a1ee46b05bdb0def 100644 (file)
@@ -6,13 +6,13 @@
  */
 
 #include <linux/kernel_stat.h>
+#include <linux/cputime.h>
 #include <linux/export.h>
 #include <linux/kernel.h>
 #include <linux/timex.h>
 #include <linux/types.h>
 #include <linux/time.h>
 
-#include <asm/cputime.h>
 #include <asm/vtimer.h>
 #include <asm/vtime.h>
 #include <asm/cpu_mf.h>
@@ -90,14 +90,41 @@ static void update_mt_scaling(void)
        __this_cpu_write(mt_scaling_jiffies, jiffies_64);
 }
 
+static inline u64 update_tsk_timer(unsigned long *tsk_vtime, u64 new)
+{
+       u64 delta;
+
+       delta = new - *tsk_vtime;
+       *tsk_vtime = new;
+       return delta;
+}
+
+
+static inline u64 scale_vtime(u64 vtime)
+{
+       u64 mult = __this_cpu_read(mt_scaling_mult);
+       u64 div = __this_cpu_read(mt_scaling_div);
+
+       if (smp_cpu_mtid)
+               return vtime * mult / div;
+       return vtime;
+}
+
+static void account_system_index_scaled(struct task_struct *p,
+                                       cputime_t cputime, cputime_t scaled,
+                                       enum cpu_usage_stat index)
+{
+       p->stimescaled += cputime_to_nsecs(scaled);
+       account_system_index_time(p, cputime_to_nsecs(cputime), index);
+}
+
 /*
  * Update process times based on virtual cpu times stored by entry.S
  * to the lowcore fields user_timer, system_timer & steal_clock.
  */
 static int do_account_vtime(struct task_struct *tsk)
 {
-       u64 timer, clock, user, system, steal;
-       u64 user_scaled, system_scaled;
+       u64 timer, clock, user, guest, system, hardirq, softirq, steal;
 
        timer = S390_lowcore.last_update_timer;
        clock = S390_lowcore.last_update_clock;
@@ -110,53 +137,76 @@ static int do_account_vtime(struct task_struct *tsk)
 #endif
                : "=m" (S390_lowcore.last_update_timer),
                  "=m" (S390_lowcore.last_update_clock));
-       S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
-       S390_lowcore.steal_timer += S390_lowcore.last_update_clock - clock;
+       clock = S390_lowcore.last_update_clock - clock;
+       timer -= S390_lowcore.last_update_timer;
+
+       if (hardirq_count())
+               S390_lowcore.hardirq_timer += timer;
+       else
+               S390_lowcore.system_timer += timer;
 
        /* Update MT utilization calculation */
        if (smp_cpu_mtid &&
            time_after64(jiffies_64, this_cpu_read(mt_scaling_jiffies)))
                update_mt_scaling();
 
-       user = S390_lowcore.user_timer - tsk->thread.user_timer;
-       S390_lowcore.steal_timer -= user;
-       tsk->thread.user_timer = S390_lowcore.user_timer;
-
-       system = S390_lowcore.system_timer - tsk->thread.system_timer;
-       S390_lowcore.steal_timer -= system;
-       tsk->thread.system_timer = S390_lowcore.system_timer;
-
-       user_scaled = user;
-       system_scaled = system;
-       /* Do MT utilization scaling */
-       if (smp_cpu_mtid) {
-               u64 mult = __this_cpu_read(mt_scaling_mult);
-               u64 div = __this_cpu_read(mt_scaling_div);
+       /* Calculate cputime delta */
+       user = update_tsk_timer(&tsk->thread.user_timer,
+                               READ_ONCE(S390_lowcore.user_timer));
+       guest = update_tsk_timer(&tsk->thread.guest_timer,
+                                READ_ONCE(S390_lowcore.guest_timer));
+       system = update_tsk_timer(&tsk->thread.system_timer,
+                                 READ_ONCE(S390_lowcore.system_timer));
+       hardirq = update_tsk_timer(&tsk->thread.hardirq_timer,
+                                  READ_ONCE(S390_lowcore.hardirq_timer));
+       softirq = update_tsk_timer(&tsk->thread.softirq_timer,
+                                  READ_ONCE(S390_lowcore.softirq_timer));
+       S390_lowcore.steal_timer +=
+               clock - user - guest - system - hardirq - softirq;
+
+       /* Push account value */
+       if (user) {
+               account_user_time(tsk, cputime_to_nsecs(user));
+               tsk->utimescaled += cputime_to_nsecs(scale_vtime(user));
+       }
 
-               user_scaled = (user_scaled * mult) / div;
-               system_scaled = (system_scaled * mult) / div;
+       if (guest) {
+               account_guest_time(tsk, cputime_to_nsecs(guest));
+               tsk->utimescaled += cputime_to_nsecs(scale_vtime(guest));
        }
-       account_user_time(tsk, user);
-       tsk->utimescaled += user_scaled;
-       account_system_time(tsk, 0, system);
-       tsk->stimescaled += system_scaled;
+
+       if (system)
+               account_system_index_scaled(tsk, system, scale_vtime(system),
+                                           CPUTIME_SYSTEM);
+       if (hardirq)
+               account_system_index_scaled(tsk, hardirq, scale_vtime(hardirq),
+                                           CPUTIME_IRQ);
+       if (softirq)
+               account_system_index_scaled(tsk, softirq, scale_vtime(softirq),
+                                           CPUTIME_SOFTIRQ);
 
        steal = S390_lowcore.steal_timer;
        if ((s64) steal > 0) {
                S390_lowcore.steal_timer = 0;
-               account_steal_time(steal);
+               account_steal_time(cputime_to_nsecs(steal));
        }
 
-       return virt_timer_forward(user + system);
+       return virt_timer_forward(user + guest + system + hardirq + softirq);
 }
 
 void vtime_task_switch(struct task_struct *prev)
 {
        do_account_vtime(prev);
        prev->thread.user_timer = S390_lowcore.user_timer;
+       prev->thread.guest_timer = S390_lowcore.guest_timer;
        prev->thread.system_timer = S390_lowcore.system_timer;
+       prev->thread.hardirq_timer = S390_lowcore.hardirq_timer;
+       prev->thread.softirq_timer = S390_lowcore.softirq_timer;
        S390_lowcore.user_timer = current->thread.user_timer;
+       S390_lowcore.guest_timer = current->thread.guest_timer;
        S390_lowcore.system_timer = current->thread.system_timer;
+       S390_lowcore.hardirq_timer = current->thread.hardirq_timer;
+       S390_lowcore.softirq_timer = current->thread.softirq_timer;
 }
 
 /*
@@ -164,7 +214,7 @@ void vtime_task_switch(struct task_struct *prev)
  * accounting system time in order to correctly compute
  * the stolen time accounting.
  */
-void vtime_account_user(struct task_struct *tsk)
+void vtime_flush(struct task_struct *tsk)
 {
        if (do_account_vtime(tsk))
                virt_timer_expire();
@@ -176,32 +226,22 @@ void vtime_account_user(struct task_struct *tsk)
  */
 void vtime_account_irq_enter(struct task_struct *tsk)
 {
-       u64 timer, system, system_scaled;
+       u64 timer;
 
        timer = S390_lowcore.last_update_timer;
        S390_lowcore.last_update_timer = get_vtimer();
-       S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
-
-       /* Update MT utilization calculation */
-       if (smp_cpu_mtid &&
-           time_after64(jiffies_64, this_cpu_read(mt_scaling_jiffies)))
-               update_mt_scaling();
-
-       system = S390_lowcore.system_timer - tsk->thread.system_timer;
-       S390_lowcore.steal_timer -= system;
-       tsk->thread.system_timer = S390_lowcore.system_timer;
-       system_scaled = system;
-       /* Do MT utilization scaling */
-       if (smp_cpu_mtid) {
-               u64 mult = __this_cpu_read(mt_scaling_mult);
-               u64 div = __this_cpu_read(mt_scaling_div);
-
-               system_scaled = (system_scaled * mult) / div;
-       }
-       account_system_time(tsk, 0, system);
-       tsk->stimescaled += system_scaled;
-
-       virt_timer_forward(system);
+       timer -= S390_lowcore.last_update_timer;
+
+       if ((tsk->flags & PF_VCPU) && (irq_count() == 0))
+               S390_lowcore.guest_timer += timer;
+       else if (hardirq_count())
+               S390_lowcore.hardirq_timer += timer;
+       else if (in_serving_softirq())
+               S390_lowcore.softirq_timer += timer;
+       else
+               S390_lowcore.system_timer += timer;
+
+       virt_timer_forward(timer);
 }
 EXPORT_SYMBOL_GPL(vtime_account_irq_enter);
 
index 7a1897c51c5495f3f2b13d86ba8f6344e2234788..d56ef26d46816b834068609ceb940ce01901d731 100644 (file)
@@ -202,7 +202,7 @@ static inline pgste_t ptep_xchg_start(struct mm_struct *mm,
        return pgste;
 }
 
-static inline void ptep_xchg_commit(struct mm_struct *mm,
+static inline pte_t ptep_xchg_commit(struct mm_struct *mm,
                                    unsigned long addr, pte_t *ptep,
                                    pgste_t pgste, pte_t old, pte_t new)
 {
@@ -220,6 +220,7 @@ static inline void ptep_xchg_commit(struct mm_struct *mm,
        } else {
                *ptep = new;
        }
+       return old;
 }
 
 pte_t ptep_xchg_direct(struct mm_struct *mm, unsigned long addr,
@@ -231,7 +232,7 @@ pte_t ptep_xchg_direct(struct mm_struct *mm, unsigned long addr,
        preempt_disable();
        pgste = ptep_xchg_start(mm, addr, ptep);
        old = ptep_flush_direct(mm, addr, ptep);
-       ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
+       old = ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
        preempt_enable();
        return old;
 }
@@ -246,7 +247,7 @@ pte_t ptep_xchg_lazy(struct mm_struct *mm, unsigned long addr,
        preempt_disable();
        pgste = ptep_xchg_start(mm, addr, ptep);
        old = ptep_flush_lazy(mm, addr, ptep);
-       ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
+       old = ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
        preempt_enable();
        return old;
 }
index a05218ff3fe465b6e4812d7655360dc1b495a519..51970bb6c4fedf8ae0d96f1c25cbd4d8cb69b858 100644 (file)
@@ -4,7 +4,6 @@ header-y +=
 
 generic-y += barrier.h
 generic-y += clkdev.h
-generic-y += cputime.h
 generic-y += irq_work.h
 generic-y += mcs_spinlock.h
 generic-y += mm-arch-hooks.h
index 9bdcf72ec06adea46e83da8952d0d70be19f61be..2fce54d9c388496e8c3de442e6c3ddb835acddaf 100644 (file)
@@ -25,7 +25,7 @@ CONFIG_SH_SH7785LCR=y
 CONFIG_NO_HZ=y
 CONFIG_HIGH_RES_TIMERS=y
 CONFIG_CPU_FREQ=y
-CONFIG_CPU_FREQ_STAT_DETAILS=y
+CONFIG_CPU_FREQ_STAT=y
 CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
 CONFIG_SH_CPU_FREQ=y
 CONFIG_HEARTBEAT=y
index 751c3373a92c8882331fa8dfa6e567c4c4be7584..cf2a75063b53c679e19b625015f2137ede467a19 100644 (file)
@@ -1,7 +1,6 @@
 
 generic-y += bitsperlong.h
 generic-y += clkdev.h
-generic-y += cputime.h
 generic-y += current.h
 generic-y += delay.h
 generic-y += div64.h
index 0569bfac4afbb68d1fa046c6201ad1cb17d9a841..e9e837bc3158c93c426c7f2a1716f0abea9e640d 100644 (file)
@@ -2,7 +2,6 @@
 
 
 generic-y += clkdev.h
-generic-y += cputime.h
 generic-y += div64.h
 generic-y += emergency-restart.h
 generic-y += exec.h
index b84be675e507857e27766b6339e438270aea0ebe..d0317993e9476fd1178a693638d9ede23860171b 100644 (file)
@@ -35,15 +35,15 @@ void __tsb_context_switch(unsigned long pgd_pa,
 static inline void tsb_context_switch(struct mm_struct *mm)
 {
        __tsb_context_switch(__pa(mm->pgd),
-                            &mm->context.tsb_block[0],
+                            &mm->context.tsb_block[MM_TSB_BASE],
 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
-                            (mm->context.tsb_block[1].tsb ?
-                             &mm->context.tsb_block[1] :
+                            (mm->context.tsb_block[MM_TSB_HUGE].tsb ?
+                             &mm->context.tsb_block[MM_TSB_HUGE] :
                              NULL)
 #else
                             NULL
 #endif
-                            , __pa(&mm->context.tsb_descr[0]));
+                            , __pa(&mm->context.tsb_descr[MM_TSB_BASE]));
 }
 
 void tsb_grow(struct mm_struct *mm,
index 3bebf395252cc63ee3b39996f8a0d0431e7faf37..4d0248aa0928695597161d93f325a49311b43e2c 100644 (file)
@@ -1021,7 +1021,7 @@ static void __init alloc_one_queue(unsigned long *pa_ptr, unsigned long qmask)
        unsigned long order = get_order(size);
        unsigned long p;
 
-       p = __get_free_pages(GFP_KERNEL, order);
+       p = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
        if (!p) {
                prom_printf("SUN4V: Error, cannot allocate queue.\n");
                prom_halt();
index c59af546f522999342361a5babcb64a1dfab3ccd..3caed40235898698751ff0cf3d6ca7d98dc83da6 100644 (file)
@@ -43,8 +43,8 @@ static const char poweroff_msg[32] __attribute__((aligned(32))) =
        "Linux powering off";
 static const char rebooting_msg[32] __attribute__((aligned(32))) =
        "Linux rebooting";
-static const char panicing_msg[32] __attribute__((aligned(32))) =
-       "Linux panicing";
+static const char panicking_msg[32] __attribute__((aligned(32))) =
+       "Linux panicking";
 
 static int sstate_reboot_call(struct notifier_block *np, unsigned long type, void *_unused)
 {
@@ -76,7 +76,7 @@ static struct notifier_block sstate_reboot_notifier = {
 
 static int sstate_panic_event(struct notifier_block *n, unsigned long event, void *ptr)
 {
-       do_set_sstate(HV_SOFT_STATE_TRANSITION, panicing_msg);
+       do_set_sstate(HV_SOFT_STATE_TRANSITION, panicking_msg);
 
        return NOTIFY_DONE;
 }
index 4bc10e44d1ca32a0acdf69b8492e128e7ef0e600..dfc97a47c9a08a330f31040fe120030ebe8cc098 100644 (file)
@@ -2051,6 +2051,73 @@ void sun4v_resum_overflow(struct pt_regs *regs)
        atomic_inc(&sun4v_resum_oflow_cnt);
 }
 
+/* Given a set of registers, get the virtual addressi that was being accessed
+ * by the faulting instructions at tpc.
+ */
+static unsigned long sun4v_get_vaddr(struct pt_regs *regs)
+{
+       unsigned int insn;
+
+       if (!copy_from_user(&insn, (void __user *)regs->tpc, 4)) {
+               return compute_effective_address(regs, insn,
+                                                (insn >> 25) & 0x1f);
+       }
+       return 0;
+}
+
+/* Attempt to handle non-resumable errors generated from userspace.
+ * Returns true if the signal was handled, false otherwise.
+ */
+bool sun4v_nonresum_error_user_handled(struct pt_regs *regs,
+                                 struct sun4v_error_entry *ent) {
+
+       unsigned int attrs = ent->err_attrs;
+
+       if (attrs & SUN4V_ERR_ATTRS_MEMORY) {
+               unsigned long addr = ent->err_raddr;
+               siginfo_t info;
+
+               if (addr == ~(u64)0) {
+                       /* This seems highly unlikely to ever occur */
+                       pr_emerg("SUN4V NON-RECOVERABLE ERROR: Memory error detected in unknown location!\n");
+               } else {
+                       unsigned long page_cnt = DIV_ROUND_UP(ent->err_size,
+                                                             PAGE_SIZE);
+
+                       /* Break the unfortunate news. */
+                       pr_emerg("SUN4V NON-RECOVERABLE ERROR: Memory failed at %016lX\n",
+                                addr);
+                       pr_emerg("SUN4V NON-RECOVERABLE ERROR:   Claiming %lu ages.\n",
+                                page_cnt);
+
+                       while (page_cnt-- > 0) {
+                               if (pfn_valid(addr >> PAGE_SHIFT))
+                                       get_page(pfn_to_page(addr >> PAGE_SHIFT));
+                               addr += PAGE_SIZE;
+                       }
+               }
+               info.si_signo = SIGKILL;
+               info.si_errno = 0;
+               info.si_trapno = 0;
+               force_sig_info(info.si_signo, &info, current);
+
+               return true;
+       }
+       if (attrs & SUN4V_ERR_ATTRS_PIO) {
+               siginfo_t info;
+
+               info.si_signo = SIGBUS;
+               info.si_code = BUS_ADRERR;
+               info.si_addr = (void __user *)sun4v_get_vaddr(regs);
+               force_sig_info(info.si_signo, &info, current);
+
+               return true;
+       }
+
+       /* Default to doing nothing */
+       return false;
+}
+
 /* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
  * Log the event, clear the first word of the entry, and die.
  */
@@ -2075,6 +2142,12 @@ void sun4v_nonresum_error(struct pt_regs *regs, unsigned long offset)
 
        put_cpu();
 
+       if (!(regs->tstate & TSTATE_PRIV) &&
+           sun4v_nonresum_error_user_handled(regs, &local_copy)) {
+               /* DON'T PANIC: This userspace error was handled. */
+               return;
+       }
+
 #ifdef CONFIG_PCI
        /* Check for the special PCI poke sequence. */
        if (pci_poke_in_progress && pci_poke_cpu == cpu) {
index 2d1f5638974cded6c3311945e8741d047bf2d4e9..aa48b6eaff2d605f80c88a59a04b7313e9fb6dc5 100644 (file)
@@ -4,8 +4,6 @@ header-y += ../arch/
 generic-y += bug.h
 generic-y += bugs.h
 generic-y += clkdev.h
-generic-y += cputime.h
-generic-y += div64.h
 generic-y += emergency-restart.h
 generic-y += errno.h
 generic-y += exec.h
diff --git a/arch/tile/include/asm/div64.h b/arch/tile/include/asm/div64.h
new file mode 100644 (file)
index 0000000..9f765cd
--- /dev/null
@@ -0,0 +1,16 @@
+#ifndef _ASM_TILE_DIV64_H
+#define _ASM_TILE_DIV64_H
+
+#include <linux/types.h>
+
+#ifdef __tilegx__
+static inline u64 mul_u32_u32(u32 a, u32 b)
+{
+       return __insn_mul_lu_lu(a, b);
+}
+#define mul_u32_u32 mul_u32_u32
+#endif
+
+#include <asm-generic/div64.h>
+
+#endif /* _ASM_TILE_DIV64_H */
index d89b7011667cb4f1a6f3ad55238d2c815e229c41..e279572824b15e07616b98215fb51c1fa65f4c9f 100644 (file)
@@ -111,7 +111,7 @@ static int tile_gpr_set(struct task_struct *target,
                          const void *kbuf, const void __user *ubuf)
 {
        int ret;
-       struct pt_regs regs;
+       struct pt_regs regs = *task_pt_regs(target);
 
        ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &regs, 0,
                                 sizeof(regs));
index 05523f14d7b2b3de379e4f837d6567c2234fa8f4..57f03050c8505b00353a3902f014733de651db94 100644 (file)
@@ -76,7 +76,7 @@ static ssize_t rng_dev_read (struct file *filp, char __user *buf, size_t size,
                        add_sigio_fd(random_fd);
 
                        add_wait_queue(&host_read_wait, &wait);
-                       set_task_state(current, TASK_INTERRUPTIBLE);
+                       set_current_state(TASK_INTERRUPTIBLE);
 
                        schedule();
                        remove_wait_queue(&host_read_wait, &wait);
index 052f7f6d0551c184bb05ff0b63d26705a6385f44..90c281cd7e1dbcf18c46c6b68f65bead984352fc 100644 (file)
@@ -1,7 +1,6 @@
 generic-y += barrier.h
 generic-y += bug.h
 generic-y += clkdev.h
-generic-y += cputime.h
 generic-y += current.h
 generic-y += delay.h
 generic-y += device.h
index 256c45b3ae343c983e667b01404d8fb3e3667b4a..5d51ade89f4c5f8ca1875dbf3505853f007e8109 100644 (file)
@@ -4,7 +4,6 @@ generic-y += auxvec.h
 generic-y += bitsperlong.h
 generic-y += bugs.h
 generic-y += clkdev.h
-generic-y += cputime.h
 generic-y += current.h
 generic-y += device.h
 generic-y += div64.h
index e487493bbd47f0f84caed99f6c952bf6b3062413..f8fbfc5a98babdc2208932970e8a98099b577950 100644 (file)
@@ -1070,7 +1070,7 @@ config X86_MCE_THRESHOLD
        def_bool y
 
 config X86_MCE_INJECT
-       depends on X86_MCE
+       depends on X86_MCE && X86_LOCAL_APIC
        tristate "Machine check injector support"
        ---help---
          Provide support for injecting machine checks for testing purposes.
@@ -1994,10 +1994,6 @@ config RANDOMIZE_BASE
          theoretically possible, but the implementations are further
          limited due to memory layouts.
 
-         If CONFIG_HIBERNATE is also enabled, KASLR is disabled at boot
-         time. To enable it, boot with "kaslr" on the kernel command
-         line (which will also disable hibernation).
-
          If unsure, say N.
 
 # Relocation on x86 needs some additional build support
index 67eec55093a5dc97634aea899c4be86ff9f18f00..783099f2ac72bc45c7196f71f344c81728521d95 100644 (file)
@@ -120,14 +120,6 @@ config DEBUG_SET_MODULE_RONX
          against certain classes of kernel exploits.
          If in doubt, say "N".
 
-config DEBUG_NX_TEST
-       tristate "Testcase for the NX non-executable stack feature"
-       depends on DEBUG_KERNEL && m
-       ---help---
-         This option enables a testcase for the CPU NX capability
-         and the software setup of this feature.
-         If in doubt, say "N"
-
 config DOUBLEFAULT
        default y
        bool "Enable doublefault exception handler" if EXPERT
index e5612f3e3b57ca7611f1e348c7f317f1416dd54a..9b42b6d1e902f20f50636b152b7b4236578c5589 100644 (file)
@@ -333,6 +333,7 @@ size_t strnlen(const char *s, size_t maxlen);
 unsigned int atou(const char *s);
 unsigned long long simple_strtoull(const char *cp, char **endp, unsigned int base);
 size_t strlen(const char *s);
+char *strchr(const char *s, int c);
 
 /* tty.c */
 void puts(const char *);
index ff01c8fc76f74223c9254f0aae1317de16834ced..801c7a158e55938a830e709578be069f3b649c19 100644 (file)
@@ -32,160 +32,13 @@ static void setup_boot_services##bits(struct efi_config *c)                \
                                                                        \
        table = (typeof(table))sys_table;                               \
                                                                        \
+       c->runtime_services = table->runtime;                           \
        c->boot_services = table->boottime;                             \
        c->text_output = table->con_out;                                \
 }
 BOOT_SERVICES(32);
 BOOT_SERVICES(64);
 
-void efi_char16_printk(efi_system_table_t *, efi_char16_t *);
-
-static efi_status_t
-__file_size32(void *__fh, efi_char16_t *filename_16,
-             void **handle, u64 *file_sz)
-{
-       efi_file_handle_32_t *h, *fh = __fh;
-       efi_file_info_t *info;
-       efi_status_t status;
-       efi_guid_t info_guid = EFI_FILE_INFO_ID;
-       u32 info_sz;
-
-       status = efi_early->call((unsigned long)fh->open, fh, &h, filename_16,
-                                EFI_FILE_MODE_READ, (u64)0);
-       if (status != EFI_SUCCESS) {
-               efi_printk(sys_table, "Failed to open file: ");
-               efi_char16_printk(sys_table, filename_16);
-               efi_printk(sys_table, "\n");
-               return status;
-       }
-
-       *handle = h;
-
-       info_sz = 0;
-       status = efi_early->call((unsigned long)h->get_info, h, &info_guid,
-                                &info_sz, NULL);
-       if (status != EFI_BUFFER_TOO_SMALL) {
-               efi_printk(sys_table, "Failed to get file info size\n");
-               return status;
-       }
-
-grow:
-       status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
-                               info_sz, (void **)&info);
-       if (status != EFI_SUCCESS) {
-               efi_printk(sys_table, "Failed to alloc mem for file info\n");
-               return status;
-       }
-
-       status = efi_early->call((unsigned long)h->get_info, h, &info_guid,
-                                &info_sz, info);
-       if (status == EFI_BUFFER_TOO_SMALL) {
-               efi_call_early(free_pool, info);
-               goto grow;
-       }
-
-       *file_sz = info->file_size;
-       efi_call_early(free_pool, info);
-
-       if (status != EFI_SUCCESS)
-               efi_printk(sys_table, "Failed to get initrd info\n");
-
-       return status;
-}
-
-static efi_status_t
-__file_size64(void *__fh, efi_char16_t *filename_16,
-             void **handle, u64 *file_sz)
-{
-       efi_file_handle_64_t *h, *fh = __fh;
-       efi_file_info_t *info;
-       efi_status_t status;
-       efi_guid_t info_guid = EFI_FILE_INFO_ID;
-       u64 info_sz;
-
-       status = efi_early->call((unsigned long)fh->open, fh, &h, filename_16,
-                                EFI_FILE_MODE_READ, (u64)0);
-       if (status != EFI_SUCCESS) {
-               efi_printk(sys_table, "Failed to open file: ");
-               efi_char16_printk(sys_table, filename_16);
-               efi_printk(sys_table, "\n");
-               return status;
-       }
-
-       *handle = h;
-
-       info_sz = 0;
-       status = efi_early->call((unsigned long)h->get_info, h, &info_guid,
-                                &info_sz, NULL);
-       if (status != EFI_BUFFER_TOO_SMALL) {
-               efi_printk(sys_table, "Failed to get file info size\n");
-               return status;
-       }
-
-grow:
-       status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
-                               info_sz, (void **)&info);
-       if (status != EFI_SUCCESS) {
-               efi_printk(sys_table, "Failed to alloc mem for file info\n");
-               return status;
-       }
-
-       status = efi_early->call((unsigned long)h->get_info, h, &info_guid,
-                                &info_sz, info);
-       if (status == EFI_BUFFER_TOO_SMALL) {
-               efi_call_early(free_pool, info);
-               goto grow;
-       }
-
-       *file_sz = info->file_size;
-       efi_call_early(free_pool, info);
-
-       if (status != EFI_SUCCESS)
-               efi_printk(sys_table, "Failed to get initrd info\n");
-
-       return status;
-}
-efi_status_t
-efi_file_size(efi_system_table_t *sys_table, void *__fh,
-             efi_char16_t *filename_16, void **handle, u64 *file_sz)
-{
-       if (efi_early->is64)
-               return __file_size64(__fh, filename_16, handle, file_sz);
-
-       return __file_size32(__fh, filename_16, handle, file_sz);
-}
-
-efi_status_t
-efi_file_read(void *handle, unsigned long *size, void *addr)
-{
-       unsigned long func;
-
-       if (efi_early->is64) {
-               efi_file_handle_64_t *fh = handle;
-
-               func = (unsigned long)fh->read;
-               return efi_early->call(func, handle, size, addr);
-       } else {
-               efi_file_handle_32_t *fh = handle;
-
-               func = (unsigned long)fh->read;
-               return efi_early->call(func, handle, size, addr);
-       }
-}
-
-efi_status_t efi_file_close(void *handle)
-{
-       if (efi_early->is64) {
-               efi_file_handle_64_t *fh = handle;
-
-               return efi_early->call((unsigned long)fh->close, handle);
-       } else {
-               efi_file_handle_32_t *fh = handle;
-
-               return efi_early->call((unsigned long)fh->close, handle);
-       }
-}
-
 static inline efi_status_t __open_volume32(void *__image, void **__fh)
 {
        efi_file_io_interface_t *io;
@@ -249,30 +102,8 @@ efi_open_volume(efi_system_table_t *sys_table, void *__image, void **__fh)
 
 void efi_char16_printk(efi_system_table_t *table, efi_char16_t *str)
 {
-       unsigned long output_string;
-       size_t offset;
-
-       if (efi_early->is64) {
-               struct efi_simple_text_output_protocol_64 *out;
-               u64 *func;
-
-               offset = offsetof(typeof(*out), output_string);
-               output_string = efi_early->text_output + offset;
-               out = (typeof(out))(unsigned long)efi_early->text_output;
-               func = (u64 *)output_string;
-
-               efi_early->call(*func, out, str);
-       } else {
-               struct efi_simple_text_output_protocol_32 *out;
-               u32 *func;
-
-               offset = offsetof(typeof(*out), output_string);
-               output_string = efi_early->text_output + offset;
-               out = (typeof(out))(unsigned long)efi_early->text_output;
-               func = (u32 *)output_string;
-
-               efi_early->call(*func, out, str);
-       }
+       efi_call_proto(efi_simple_text_output_protocol, output_string,
+                      efi_early->text_output, str);
 }
 
 static efi_status_t
@@ -1157,6 +988,13 @@ struct boot_params *efi_main(struct efi_config *c,
        else
                setup_boot_services32(efi_early);
 
+       /*
+        * If the boot loader gave us a value for secure_boot then we use that,
+        * otherwise we ask the BIOS.
+        */
+       if (boot_params->secure_boot == efi_secureboot_mode_unset)
+               boot_params->secure_boot = efi_get_secureboot(sys_table);
+
        setup_graphics(boot_params);
 
        setup_efi_pci(boot_params);
index fd0b6a272dd5bf252b4aad12be6b961789cc2404..d85b9625e836d5429e4eff13104e0b01f338f4d0 100644 (file)
@@ -82,7 +82,7 @@ ENTRY(efi_pe_entry)
 
        /* Relocate efi_config->call() */
        leal    efi32_config(%esi), %eax
-       add     %esi, 32(%eax)
+       add     %esi, 40(%eax)
        pushl   %eax
 
        call    make_boot_params
@@ -108,7 +108,7 @@ ENTRY(efi32_stub_entry)
 
        /* Relocate efi_config->call() */
        leal    efi32_config(%esi), %eax
-       add     %esi, 32(%eax)
+       add     %esi, 40(%eax)
        pushl   %eax
 2:
        call    efi_main
@@ -264,7 +264,7 @@ relocated:
 #ifdef CONFIG_EFI_STUB
        .data
 efi32_config:
-       .fill 4,8,0
+       .fill 5,8,0
        .long efi_call_phys
        .long 0
        .byte 0
index 4d85e600db78292b46fc2ed2676b2cb171b708e2..d2ae1f821e0c6b6d85452841160fbaf1c182a743 100644 (file)
@@ -264,7 +264,7 @@ ENTRY(efi_pe_entry)
        /*
         * Relocate efi_config->call().
         */
-       addq    %rbp, efi64_config+32(%rip)
+       addq    %rbp, efi64_config+40(%rip)
 
        movq    %rax, %rdi
        call    make_boot_params
@@ -284,7 +284,7 @@ handover_entry:
         * Relocate efi_config->call().
         */
        movq    efi_config(%rip), %rax
-       addq    %rbp, 32(%rax)
+       addq    %rbp, 40(%rax)
 2:
        movq    efi_config(%rip), %rdi
        call    efi_main
@@ -456,14 +456,14 @@ efi_config:
 #ifdef CONFIG_EFI_MIXED
        .global efi32_config
 efi32_config:
-       .fill   4,8,0
+       .fill   5,8,0
        .quad   efi64_thunk
        .byte   0
 #endif
 
        .global efi64_config
 efi64_config:
-       .fill   4,8,0
+       .fill   5,8,0
        .quad   efi_call
        .byte   1
 #endif /* CONFIG_EFI_STUB */
index a66854d99ee1a34951faa2d426c8579bf4849bbc..8b7c9e75edcbf5a397c968d441a34369553f7870 100644 (file)
@@ -11,6 +11,7 @@
  */
 #include "misc.h"
 #include "error.h"
+#include "../boot.h"
 
 #include <generated/compile.h>
 #include <linux/module.h>
@@ -52,15 +53,22 @@ static unsigned long get_boot_seed(void)
 #include "../../lib/kaslr.c"
 
 struct mem_vector {
-       unsigned long start;
-       unsigned long size;
+       unsigned long long start;
+       unsigned long long size;
 };
 
+/* Only supporting at most 4 unusable memmap regions with kaslr */
+#define MAX_MEMMAP_REGIONS     4
+
+static bool memmap_too_large;
+
 enum mem_avoid_index {
        MEM_AVOID_ZO_RANGE = 0,
        MEM_AVOID_INITRD,
        MEM_AVOID_CMDLINE,
        MEM_AVOID_BOOTPARAMS,
+       MEM_AVOID_MEMMAP_BEGIN,
+       MEM_AVOID_MEMMAP_END = MEM_AVOID_MEMMAP_BEGIN + MAX_MEMMAP_REGIONS - 1,
        MEM_AVOID_MAX,
 };
 
@@ -77,6 +85,123 @@ static bool mem_overlaps(struct mem_vector *one, struct mem_vector *two)
        return true;
 }
 
+/**
+ *     _memparse - Parse a string with mem suffixes into a number
+ *     @ptr: Where parse begins
+ *     @retptr: (output) Optional pointer to next char after parse completes
+ *
+ *     Parses a string into a number.  The number stored at @ptr is
+ *     potentially suffixed with K, M, G, T, P, E.
+ */
+static unsigned long long _memparse(const char *ptr, char **retptr)
+{
+       char *endptr;   /* Local pointer to end of parsed string */
+
+       unsigned long long ret = simple_strtoull(ptr, &endptr, 0);
+
+       switch (*endptr) {
+       case 'E':
+       case 'e':
+               ret <<= 10;
+       case 'P':
+       case 'p':
+               ret <<= 10;
+       case 'T':
+       case 't':
+               ret <<= 10;
+       case 'G':
+       case 'g':
+               ret <<= 10;
+       case 'M':
+       case 'm':
+               ret <<= 10;
+       case 'K':
+       case 'k':
+               ret <<= 10;
+               endptr++;
+       default:
+               break;
+       }
+
+       if (retptr)
+               *retptr = endptr;
+
+       return ret;
+}
+
+static int
+parse_memmap(char *p, unsigned long long *start, unsigned long long *size)
+{
+       char *oldp;
+
+       if (!p)
+               return -EINVAL;
+
+       /* We don't care about this option here */
+       if (!strncmp(p, "exactmap", 8))
+               return -EINVAL;
+
+       oldp = p;
+       *size = _memparse(p, &p);
+       if (p == oldp)
+               return -EINVAL;
+
+       switch (*p) {
+       case '@':
+               /* Skip this region, usable */
+               *start = 0;
+               *size = 0;
+               return 0;
+       case '#':
+       case '$':
+       case '!':
+               *start = _memparse(p + 1, &p);
+               return 0;
+       }
+
+       return -EINVAL;
+}
+
+static void mem_avoid_memmap(void)
+{
+       char arg[128];
+       int rc;
+       int i;
+       char *str;
+
+       /* See if we have any memmap areas */
+       rc = cmdline_find_option("memmap", arg, sizeof(arg));
+       if (rc <= 0)
+               return;
+
+       i = 0;
+       str = arg;
+       while (str && (i < MAX_MEMMAP_REGIONS)) {
+               int rc;
+               unsigned long long start, size;
+               char *k = strchr(str, ',');
+
+               if (k)
+                       *k++ = 0;
+
+               rc = parse_memmap(str, &start, &size);
+               if (rc < 0)
+                       break;
+               str = k;
+               /* A usable region that should not be skipped */
+               if (size == 0)
+                       continue;
+
+               mem_avoid[MEM_AVOID_MEMMAP_BEGIN + i].start = start;
+               mem_avoid[MEM_AVOID_MEMMAP_BEGIN + i].size = size;
+               i++;
+       }
+
+       /* More than 4 memmaps, fail kaslr */
+       if ((i >= MAX_MEMMAP_REGIONS) && str)
+               memmap_too_large = true;
+}
+
 /*
  * In theory, KASLR can put the kernel anywhere in the range of [16M, 64T).
  * The mem_avoid array is used to store the ranges that need to be avoided
@@ -197,6 +322,9 @@ static void mem_avoid_init(unsigned long input, unsigned long input_size,
 
        /* We don't need to set a mapping for setup_data. */
 
+       /* Mark the memmap regions we need to avoid */
+       mem_avoid_memmap();
+
 #ifdef CONFIG_X86_VERBOSE_BOOTUP
        /* Make sure video RAM can be used. */
        add_identity_map(0, PMD_SIZE);
@@ -379,6 +507,12 @@ static unsigned long find_random_phys_addr(unsigned long minimum,
        int i;
        unsigned long addr;
 
+       /* Check if we had too many memmaps. */
+       if (memmap_too_large) {
+               debug_putstr("Aborted e820 scan (more than 4 memmap= args)!\n");
+               return 0;
+       }
+
        /* Make sure minimum is aligned. */
        minimum = ALIGN(minimum, CONFIG_PHYSICAL_ALIGN);
 
@@ -456,7 +590,7 @@ void choose_random_location(unsigned long input,
        /* Walk e820 and find a random address. */
        random_addr = find_random_phys_addr(min_addr, output_size);
        if (!random_addr) {
-               warn("KASLR disabled: could not find suitable E820 region!");
+               warn("Physical KASLR disabled: no suitable memory region!");
        } else {
                /* Update the new physical address location. */
                if (*output != random_addr) {
index 9e240fcba784b085bd55e77dc79c08d21d1a23e2..5457b02fc05077b69ada23565bb54026aca09c0a 100644 (file)
@@ -156,3 +156,16 @@ char *strstr(const char *s1, const char *s2)
        }
        return NULL;
 }
+
+/**
+ * strchr - Find the first occurrence of the character c in the string s.
+ * @s: the string to be searched
+ * @c: the character to search for
+ */
+char *strchr(const char *s, int c)
+{
+       while (*s != (char)c)
+               if (*s++ == '\0')
+                       return NULL;
+       return (char *)s;
+}
index 6ef688a1ef3e0f022032e5317662b9011c8f74c4..7ff1b0c86a8e5a630844c3ade200b592cbad413d 100644 (file)
@@ -1085,9 +1085,9 @@ static void aesni_free_simds(void)
                    aesni_simd_skciphers[i]; i++)
                simd_skcipher_free(aesni_simd_skciphers[i]);
 
-       for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers2) &&
-                   aesni_simd_skciphers2[i].simd; i++)
-               simd_skcipher_free(aesni_simd_skciphers2[i].simd);
+       for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers2); i++)
+               if (aesni_simd_skciphers2[i].simd)
+                       simd_skcipher_free(aesni_simd_skciphers2[i].simd);
 }
 
 static int __init aesni_init(void)
@@ -1168,7 +1168,7 @@ static int __init aesni_init(void)
                simd = simd_skcipher_create_compat(algname, drvname, basename);
                err = PTR_ERR(simd);
                if (IS_ERR(simd))
-                       goto unregister_simds;
+                       continue;
 
                aesni_simd_skciphers2[i].simd = simd;
        }
index 1d392c39fe560a782ae7fd71296ee0cc64f47fe5..b8ccdb5c92442c6e5a05277e5a2839f956d198c8 100644 (file)
@@ -1,11 +1,4 @@
-obj-y                  += core.o
-
-obj-$(CONFIG_CPU_SUP_AMD)               += amd/core.o amd/uncore.o
-obj-$(CONFIG_PERF_EVENTS_AMD_POWER)    += amd/power.o
-obj-$(CONFIG_X86_LOCAL_APIC)            += amd/ibs.o msr.o
-ifdef CONFIG_AMD_IOMMU
-obj-$(CONFIG_CPU_SUP_AMD)               += amd/iommu.o
-endif
-
-obj-$(CONFIG_CPU_SUP_INTEL)            += msr.o
+obj-y                                  += core.o
+obj-y                                  += amd/
+obj-$(CONFIG_X86_LOCAL_APIC)            += msr.o
 obj-$(CONFIG_CPU_SUP_INTEL)            += intel/
diff --git a/arch/x86/events/amd/Makefile b/arch/x86/events/amd/Makefile
new file mode 100644 (file)
index 0000000..b1da46f
--- /dev/null
@@ -0,0 +1,7 @@
+obj-$(CONFIG_CPU_SUP_AMD)              += core.o uncore.o
+obj-$(CONFIG_PERF_EVENTS_AMD_POWER)    += power.o
+obj-$(CONFIG_X86_LOCAL_APIC)           += ibs.o
+ifdef CONFIG_AMD_IOMMU
+obj-$(CONFIG_CPU_SUP_AMD)              += iommu.o
+endif
+
index a0b1bdb3ad421ed93746718717d47eb84904e86f..4d1f7f2d9aff6f3383ab09de7119d3812d8ed3fe 100644 (file)
 
 #define NUM_COUNTERS_NB                4
 #define NUM_COUNTERS_L2                4
-#define MAX_COUNTERS           NUM_COUNTERS_NB
+#define NUM_COUNTERS_L3                6
+#define MAX_COUNTERS           6
 
 #define RDPMC_BASE_NB          6
-#define RDPMC_BASE_L         10
+#define RDPMC_BASE_LLC         10
 
 #define COUNTER_SHIFT          16
 
+static int num_counters_llc;
+static int num_counters_nb;
+
 static HLIST_HEAD(uncore_unused_list);
 
 struct amd_uncore {
@@ -45,30 +49,30 @@ struct amd_uncore {
 };
 
 static struct amd_uncore * __percpu *amd_uncore_nb;
-static struct amd_uncore * __percpu *amd_uncore_l2;
+static struct amd_uncore * __percpu *amd_uncore_llc;
 
 static struct pmu amd_nb_pmu;
-static struct pmu amd_l2_pmu;
+static struct pmu amd_llc_pmu;
 
 static cpumask_t amd_nb_active_mask;
-static cpumask_t amd_l2_active_mask;
+static cpumask_t amd_llc_active_mask;
 
 static bool is_nb_event(struct perf_event *event)
 {
        return event->pmu->type == amd_nb_pmu.type;
 }
 
-static bool is_l2_event(struct perf_event *event)
+static bool is_llc_event(struct perf_event *event)
 {
-       return event->pmu->type == amd_l2_pmu.type;
+       return event->pmu->type == amd_llc_pmu.type;
 }
 
 static struct amd_uncore *event_to_amd_uncore(struct perf_event *event)
 {
        if (is_nb_event(event) && amd_uncore_nb)
                return *per_cpu_ptr(amd_uncore_nb, event->cpu);
-       else if (is_l2_event(event) && amd_uncore_l2)
-               return *per_cpu_ptr(amd_uncore_l2, event->cpu);
+       else if (is_llc_event(event) && amd_uncore_llc)
+               return *per_cpu_ptr(amd_uncore_llc, event->cpu);
 
        return NULL;
 }
@@ -183,16 +187,16 @@ static int amd_uncore_event_init(struct perf_event *event)
                return -ENOENT;
 
        /*
-        * NB and L2 counters (MSRs) are shared across all cores that share the
-        * same NB / L2 cache. Interrupts can be directed to a single target
-        * core, however, event counts generated by processes running on other
-        * cores cannot be masked out. So we do not support sampling and
-        * per-thread events.
+        * NB and Last level cache counters (MSRs) are shared across all cores
+        * that share the same NB / Last level cache. Interrupts can be directed
+        * to a single target core, however, event counts generated by processes
+        * running on other cores cannot be masked out. So we do not support
+        * sampling and per-thread events.
         */
        if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
                return -EINVAL;
 
-       /* NB and L2 counters do not have usr/os/guest/host bits */
+       /* NB and Last level cache counters do not have usr/os/guest/host bits */
        if (event->attr.exclude_user || event->attr.exclude_kernel ||
            event->attr.exclude_host || event->attr.exclude_guest)
                return -EINVAL;
@@ -226,8 +230,8 @@ static ssize_t amd_uncore_attr_show_cpumask(struct device *dev,
 
        if (pmu->type == amd_nb_pmu.type)
                active_mask = &amd_nb_active_mask;
-       else if (pmu->type == amd_l2_pmu.type)
-               active_mask = &amd_l2_active_mask;
+       else if (pmu->type == amd_llc_pmu.type)
+               active_mask = &amd_llc_active_mask;
        else
                return 0;
 
@@ -244,30 +248,47 @@ static struct attribute_group amd_uncore_attr_group = {
        .attrs = amd_uncore_attrs,
 };
 
-PMU_FORMAT_ATTR(event, "config:0-7,32-35");
-PMU_FORMAT_ATTR(umask, "config:8-15");
-
-static struct attribute *amd_uncore_format_attr[] = {
-       &format_attr_event.attr,
-       &format_attr_umask.attr,
-       NULL,
-};
-
-static struct attribute_group amd_uncore_format_group = {
-       .name = "format",
-       .attrs = amd_uncore_format_attr,
+/*
+ * Similar to PMU_FORMAT_ATTR but allowing for format_attr to be assigned based
+ * on family
+ */
+#define AMD_FORMAT_ATTR(_dev, _name, _format)                               \
+static ssize_t                                                              \
+_dev##_show##_name(struct device *dev,                                      \
+               struct device_attribute *attr,                               \
+               char *page)                                                  \
+{                                                                           \
+       BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE);                          \
+       return sprintf(page, _format "\n");                                  \
+}                                                                           \
+static struct device_attribute format_attr_##_dev##_name = __ATTR_RO(_dev);
+
+/* Used for each uncore counter type */
+#define AMD_ATTRIBUTE(_name)                                                \
+static struct attribute *amd_uncore_format_attr_##_name[] = {               \
+       &format_attr_event_##_name.attr,                                     \
+       &format_attr_umask.attr,                                             \
+       NULL,                                                                \
+};                                                                          \
+static struct attribute_group amd_uncore_format_group_##_name = {           \
+       .name = "format",                                                    \
+       .attrs = amd_uncore_format_attr_##_name,                             \
+};                                                                          \
+static const struct attribute_group *amd_uncore_attr_groups_##_name[] = {    \
+       &amd_uncore_attr_group,                                              \
+       &amd_uncore_format_group_##_name,                                    \
+       NULL,                                                                \
 };
 
-static const struct attribute_group *amd_uncore_attr_groups[] = {
-       &amd_uncore_attr_group,
-       &amd_uncore_format_group,
-       NULL,
-};
+AMD_FORMAT_ATTR(event, , "config:0-7,32-35");
+AMD_FORMAT_ATTR(umask, , "config:8-15");
+AMD_FORMAT_ATTR(event, _df, "config:0-7,32-35,59-60");
+AMD_FORMAT_ATTR(event, _l3, "config:0-7");
+AMD_ATTRIBUTE(df);
+AMD_ATTRIBUTE(l3);
 
 static struct pmu amd_nb_pmu = {
        .task_ctx_nr    = perf_invalid_context,
-       .attr_groups    = amd_uncore_attr_groups,
-       .name           = "amd_nb",
        .event_init     = amd_uncore_event_init,
        .add            = amd_uncore_add,
        .del            = amd_uncore_del,
@@ -276,10 +297,8 @@ static struct pmu amd_nb_pmu = {
        .read           = amd_uncore_read,
 };
 
-static struct pmu amd_l2_pmu = {
+static struct pmu amd_llc_pmu = {
        .task_ctx_nr    = perf_invalid_context,
-       .attr_groups    = amd_uncore_attr_groups,
-       .name           = "amd_l2",
        .event_init     = amd_uncore_event_init,
        .add            = amd_uncore_add,
        .del            = amd_uncore_del,
@@ -296,14 +315,14 @@ static struct amd_uncore *amd_uncore_alloc(unsigned int cpu)
 
 static int amd_uncore_cpu_up_prepare(unsigned int cpu)
 {
-       struct amd_uncore *uncore_nb = NULL, *uncore_l2;
+       struct amd_uncore *uncore_nb = NULL, *uncore_llc;
 
        if (amd_uncore_nb) {
                uncore_nb = amd_uncore_alloc(cpu);
                if (!uncore_nb)
                        goto fail;
                uncore_nb->cpu = cpu;
-               uncore_nb->num_counters = NUM_COUNTERS_NB;
+               uncore_nb->num_counters = num_counters_nb;
                uncore_nb->rdpmc_base = RDPMC_BASE_NB;
                uncore_nb->msr_base = MSR_F15H_NB_PERF_CTL;
                uncore_nb->active_mask = &amd_nb_active_mask;
@@ -312,18 +331,18 @@ static int amd_uncore_cpu_up_prepare(unsigned int cpu)
                *per_cpu_ptr(amd_uncore_nb, cpu) = uncore_nb;
        }
 
-       if (amd_uncore_l2) {
-               uncore_l2 = amd_uncore_alloc(cpu);
-               if (!uncore_l2)
+       if (amd_uncore_llc) {
+               uncore_llc = amd_uncore_alloc(cpu);
+               if (!uncore_llc)
                        goto fail;
-               uncore_l2->cpu = cpu;
-               uncore_l2->num_counters = NUM_COUNTERS_L2;
-               uncore_l2->rdpmc_base = RDPMC_BASE_L2;
-               uncore_l2->msr_base = MSR_F16H_L2I_PERF_CTL;
-               uncore_l2->active_mask = &amd_l2_active_mask;
-               uncore_l2->pmu = &amd_l2_pmu;
-               uncore_l2->id = -1;
-               *per_cpu_ptr(amd_uncore_l2, cpu) = uncore_l2;
+               uncore_llc->cpu = cpu;
+               uncore_llc->num_counters = num_counters_llc;
+               uncore_llc->rdpmc_base = RDPMC_BASE_LLC;
+               uncore_llc->msr_base = MSR_F16H_L2I_PERF_CTL;
+               uncore_llc->active_mask = &amd_llc_active_mask;
+               uncore_llc->pmu = &amd_llc_pmu;
+               uncore_llc->id = -1;
+               *per_cpu_ptr(amd_uncore_llc, cpu) = uncore_llc;
        }
 
        return 0;
@@ -376,17 +395,17 @@ static int amd_uncore_cpu_starting(unsigned int cpu)
                *per_cpu_ptr(amd_uncore_nb, cpu) = uncore;
        }
 
-       if (amd_uncore_l2) {
+       if (amd_uncore_llc) {
                unsigned int apicid = cpu_data(cpu).apicid;
                unsigned int nshared;
 
-               uncore = *per_cpu_ptr(amd_uncore_l2, cpu);
+               uncore = *per_cpu_ptr(amd_uncore_llc, cpu);
                cpuid_count(0x8000001d, 2, &eax, &ebx, &ecx, &edx);
                nshared = ((eax >> 14) & 0xfff) + 1;
                uncore->id = apicid - (apicid % nshared);
 
-               uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_l2);
-               *per_cpu_ptr(amd_uncore_l2, cpu) = uncore;
+               uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_llc);
+               *per_cpu_ptr(amd_uncore_llc, cpu) = uncore;
        }
 
        return 0;
@@ -419,8 +438,8 @@ static int amd_uncore_cpu_online(unsigned int cpu)
        if (amd_uncore_nb)
                uncore_online(cpu, amd_uncore_nb);
 
-       if (amd_uncore_l2)
-               uncore_online(cpu, amd_uncore_l2);
+       if (amd_uncore_llc)
+               uncore_online(cpu, amd_uncore_llc);
 
        return 0;
 }
@@ -456,8 +475,8 @@ static int amd_uncore_cpu_down_prepare(unsigned int cpu)
        if (amd_uncore_nb)
                uncore_down_prepare(cpu, amd_uncore_nb);
 
-       if (amd_uncore_l2)
-               uncore_down_prepare(cpu, amd_uncore_l2);
+       if (amd_uncore_llc)
+               uncore_down_prepare(cpu, amd_uncore_llc);
 
        return 0;
 }
@@ -479,8 +498,8 @@ static int amd_uncore_cpu_dead(unsigned int cpu)
        if (amd_uncore_nb)
                uncore_dead(cpu, amd_uncore_nb);
 
-       if (amd_uncore_l2)
-               uncore_dead(cpu, amd_uncore_l2);
+       if (amd_uncore_llc)
+               uncore_dead(cpu, amd_uncore_llc);
 
        return 0;
 }
@@ -492,6 +511,47 @@ static int __init amd_uncore_init(void)
        if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
                goto fail_nodev;
 
+       switch(boot_cpu_data.x86) {
+               case 23:
+                       /* Family 17h: */
+                       num_counters_nb = NUM_COUNTERS_NB;
+                       num_counters_llc = NUM_COUNTERS_L3;
+                       /*
+                        * For Family17h, the NorthBridge counters are
+                        * re-purposed as Data Fabric counters. Also, support is
+                        * added for L3 counters. The pmus are exported based on
+                        * family as either L2 or L3 and NB or DF.
+                        */
+                       amd_nb_pmu.name = "amd_df";
+                       amd_llc_pmu.name = "amd_l3";
+                       format_attr_event_df.show = &event_show_df;
+                       format_attr_event_l3.show = &event_show_l3;
+                       break;
+               case 22:
+                       /* Family 16h - may change: */
+                       num_counters_nb = NUM_COUNTERS_NB;
+                       num_counters_llc = NUM_COUNTERS_L2;
+                       amd_nb_pmu.name = "amd_nb";
+                       amd_llc_pmu.name = "amd_l2";
+                       format_attr_event_df = format_attr_event;
+                       format_attr_event_l3 = format_attr_event;
+                       break;
+               default:
+                       /*
+                        * All prior families have the same number of
+                        * NorthBridge and Last Level Cache counters
+                        */
+                       num_counters_nb = NUM_COUNTERS_NB;
+                       num_counters_llc = NUM_COUNTERS_L2;
+                       amd_nb_pmu.name = "amd_nb";
+                       amd_llc_pmu.name = "amd_l2";
+                       format_attr_event_df = format_attr_event;
+                       format_attr_event_l3 = format_attr_event;
+                       break;
+       }
+       amd_nb_pmu.attr_groups = amd_uncore_attr_groups_df;
+       amd_llc_pmu.attr_groups = amd_uncore_attr_groups_l3;
+
        if (!boot_cpu_has(X86_FEATURE_TOPOEXT))
                goto fail_nodev;
 
@@ -510,16 +570,16 @@ static int __init amd_uncore_init(void)
        }
 
        if (boot_cpu_has(X86_FEATURE_PERFCTR_L2)) {
-               amd_uncore_l2 = alloc_percpu(struct amd_uncore *);
-               if (!amd_uncore_l2) {
+               amd_uncore_llc = alloc_percpu(struct amd_uncore *);
+               if (!amd_uncore_llc) {
                        ret = -ENOMEM;
-                       goto fail_l2;
+                       goto fail_llc;
                }
-               ret = perf_pmu_register(&amd_l2_pmu, amd_l2_pmu.name, -1);
+               ret = perf_pmu_register(&amd_llc_pmu, amd_llc_pmu.name, -1);
                if (ret)
-                       goto fail_l2;
+                       goto fail_llc;
 
-               pr_info("perf: AMD L2I counters detected\n");
+               pr_info("perf: AMD LLC counters detected\n");
                ret = 0;
        }
 
@@ -529,7 +589,7 @@ static int __init amd_uncore_init(void)
        if (cpuhp_setup_state(CPUHP_PERF_X86_AMD_UNCORE_PREP,
                              "perf/x86/amd/uncore:prepare",
                              amd_uncore_cpu_up_prepare, amd_uncore_cpu_dead))
-               goto fail_l2;
+               goto fail_llc;
 
        if (cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
                              "perf/x86/amd/uncore:starting",
@@ -546,11 +606,11 @@ fail_start:
        cpuhp_remove_state(CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING);
 fail_prep:
        cpuhp_remove_state(CPUHP_PERF_X86_AMD_UNCORE_PREP);
-fail_l2:
+fail_llc:
        if (boot_cpu_has(X86_FEATURE_PERFCTR_NB))
                perf_pmu_unregister(&amd_nb_pmu);
-       if (amd_uncore_l2)
-               free_percpu(amd_uncore_l2);
+       if (amd_uncore_llc)
+               free_percpu(amd_uncore_llc);
 fail_nb:
        if (amd_uncore_nb)
                free_percpu(amd_uncore_nb);
index 1076c9a77292d77e5dbb34adbd5ce526121daa73..aff4b5b69d4021aeb0ad4356833ca3c2380a7960 100644 (file)
@@ -541,6 +541,9 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
        X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_MOBILE,  snb_cstates),
        X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_DESKTOP, snb_cstates),
 
+       X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_MOBILE,  snb_cstates),
+       X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_DESKTOP, snb_cstates),
+
        X86_CSTATES_MODEL(INTEL_FAM6_XEON_PHI_KNL, knl_cstates),
        X86_CSTATES_MODEL(INTEL_FAM6_XEON_PHI_KNM, knl_cstates),
        { },
index 1c1b9fe705c84d218cbbf3aeea92edde4bbc40cd..5900471ee50824a6c70b36fb26fce6c311094cb8 100644 (file)
@@ -99,18 +99,24 @@ static struct attribute_group pt_cap_group = {
 };
 
 PMU_FORMAT_ATTR(cyc,           "config:1"      );
+PMU_FORMAT_ATTR(pwr_evt,       "config:4"      );
+PMU_FORMAT_ATTR(fup_on_ptw,    "config:5"      );
 PMU_FORMAT_ATTR(mtc,           "config:9"      );
 PMU_FORMAT_ATTR(tsc,           "config:10"     );
 PMU_FORMAT_ATTR(noretcomp,     "config:11"     );
+PMU_FORMAT_ATTR(ptw,           "config:12"     );
 PMU_FORMAT_ATTR(mtc_period,    "config:14-17"  );
 PMU_FORMAT_ATTR(cyc_thresh,    "config:19-22"  );
 PMU_FORMAT_ATTR(psb_period,    "config:24-27"  );
 
 static struct attribute *pt_formats_attr[] = {
        &format_attr_cyc.attr,
+       &format_attr_pwr_evt.attr,
+       &format_attr_fup_on_ptw.attr,
        &format_attr_mtc.attr,
        &format_attr_tsc.attr,
        &format_attr_noretcomp.attr,
+       &format_attr_ptw.attr,
        &format_attr_mtc_period.attr,
        &format_attr_cyc_thresh.attr,
        &format_attr_psb_period.attr,
index 17c3564d087a48bc24e41417fe6f128b5d7b9f0d..22054ca49026511f6cbe2ee9ce30ca0f4281c78c 100644 (file)
@@ -161,7 +161,13 @@ static u64 rapl_timer_ms;
 
 static inline struct rapl_pmu *cpu_to_rapl_pmu(unsigned int cpu)
 {
-       return rapl_pmus->pmus[topology_logical_package_id(cpu)];
+       unsigned int pkgid = topology_logical_package_id(cpu);
+
+       /*
+        * The unsigned check also catches the '-1' return value for non
+        * existent mappings in the topology map.
+        */
+       return pkgid < rapl_pmus->maxpkg ? rapl_pmus->pmus[pkgid] : NULL;
 }
 
 static inline u64 rapl_read_counter(struct perf_event *event)
@@ -402,6 +408,8 @@ static int rapl_pmu_event_init(struct perf_event *event)
 
        /* must be done before validate_group */
        pmu = cpu_to_rapl_pmu(event->cpu);
+       if (!pmu)
+               return -EINVAL;
        event->cpu = pmu->cpu;
        event->pmu_private = pmu;
        event->hw.event_base = msr;
@@ -585,6 +593,20 @@ static int rapl_cpu_online(unsigned int cpu)
        struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
        int target;
 
+       if (!pmu) {
+               pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));
+               if (!pmu)
+                       return -ENOMEM;
+
+               raw_spin_lock_init(&pmu->lock);
+               INIT_LIST_HEAD(&pmu->active_list);
+               pmu->pmu = &rapl_pmus->pmu;
+               pmu->timer_interval = ms_to_ktime(rapl_timer_ms);
+               rapl_hrtimer_init(pmu);
+
+               rapl_pmus->pmus[topology_logical_package_id(cpu)] = pmu;
+       }
+
        /*
         * Check if there is an online cpu in the package which collects rapl
         * events already.
@@ -598,27 +620,6 @@ static int rapl_cpu_online(unsigned int cpu)
        return 0;
 }
 
-static int rapl_cpu_prepare(unsigned int cpu)
-{
-       struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
-
-       if (pmu)
-               return 0;
-
-       pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));
-       if (!pmu)
-               return -ENOMEM;
-
-       raw_spin_lock_init(&pmu->lock);
-       INIT_LIST_HEAD(&pmu->active_list);
-       pmu->pmu = &rapl_pmus->pmu;
-       pmu->timer_interval = ms_to_ktime(rapl_timer_ms);
-       pmu->cpu = -1;
-       rapl_hrtimer_init(pmu);
-       rapl_pmus->pmus[topology_logical_package_id(cpu)] = pmu;
-       return 0;
-}
-
 static int rapl_check_hw_unit(bool apply_quirk)
 {
        u64 msr_rapl_power_unit_bits;
@@ -770,6 +771,9 @@ static const struct x86_cpu_id rapl_cpu_match[] __initconst = {
        X86_RAPL_MODEL_MATCH(INTEL_FAM6_SKYLAKE_DESKTOP, skl_rapl_init),
        X86_RAPL_MODEL_MATCH(INTEL_FAM6_SKYLAKE_X,       hsx_rapl_init),
 
+       X86_RAPL_MODEL_MATCH(INTEL_FAM6_KABYLAKE_MOBILE,  skl_rapl_init),
+       X86_RAPL_MODEL_MATCH(INTEL_FAM6_KABYLAKE_DESKTOP, skl_rapl_init),
+
        X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT, hsw_rapl_init),
        {},
 };
@@ -803,29 +807,21 @@ static int __init rapl_pmu_init(void)
        /*
         * Install callbacks. Core will call them for each online cpu.
         */
-
-       ret = cpuhp_setup_state(CPUHP_PERF_X86_RAPL_PREP, "perf/x86/rapl:prepare",
-                               rapl_cpu_prepare, NULL);
-       if (ret)
-               goto out;
-
        ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_RAPL_ONLINE,
                                "perf/x86/rapl:online",
                                rapl_cpu_online, rapl_cpu_offline);
        if (ret)
-               goto out1;
+               goto out;
 
        ret = perf_pmu_register(&rapl_pmus->pmu, "power", -1);
        if (ret)
-               goto out2;
+               goto out1;
 
        rapl_advertise();
        return 0;
 
-out2:
-       cpuhp_remove_state(CPUHP_AP_PERF_X86_RAPL_ONLINE);
 out1:
-       cpuhp_remove_state(CPUHP_PERF_X86_RAPL_PREP);
+       cpuhp_remove_state(CPUHP_AP_PERF_X86_RAPL_ONLINE);
 out:
        pr_warn("Initialization failed (%d), disabled\n", ret);
        cleanup_rapl_pmus();
@@ -836,7 +832,6 @@ module_init(rapl_pmu_init);
 static void __exit intel_rapl_exit(void)
 {
        cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_RAPL_ONLINE);
-       cpuhp_remove_state_nocalls(CPUHP_PERF_X86_RAPL_PREP);
        perf_pmu_unregister(&rapl_pmus->pmu);
        cleanup_rapl_pmus();
 }
index 8c4ccdc3a3f3607ee0af4f4006029df3000e0839..758c1aa5009d2414f3165a31abb6044b6ccf0409 100644 (file)
@@ -100,7 +100,13 @@ ssize_t uncore_event_show(struct kobject *kobj,
 
 struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
 {
-       return pmu->boxes[topology_logical_package_id(cpu)];
+       unsigned int pkgid = topology_logical_package_id(cpu);
+
+       /*
+        * The unsigned check also catches the '-1' return value for non
+        * existent mappings in the topology map.
+        */
+       return pkgid < max_packages ? pmu->boxes[pkgid] : NULL;
 }
 
 u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event)
@@ -764,30 +770,6 @@ static void uncore_pmu_unregister(struct intel_uncore_pmu *pmu)
        pmu->registered = false;
 }
 
-static void __uncore_exit_boxes(struct intel_uncore_type *type, int cpu)
-{
-       struct intel_uncore_pmu *pmu = type->pmus;
-       struct intel_uncore_box *box;
-       int i, pkg;
-
-       if (pmu) {
-               pkg = topology_physical_package_id(cpu);
-               for (i = 0; i < type->num_boxes; i++, pmu++) {
-                       box = pmu->boxes[pkg];
-                       if (box)
-                               uncore_box_exit(box);
-               }
-       }
-}
-
-static void uncore_exit_boxes(void *dummy)
-{
-       struct intel_uncore_type **types;
-
-       for (types = uncore_msr_uncores; *types; types++)
-               __uncore_exit_boxes(*types++, smp_processor_id());
-}
-
 static void uncore_free_boxes(struct intel_uncore_pmu *pmu)
 {
        int pkg;
@@ -1058,86 +1040,6 @@ static void uncore_pci_exit(void)
        }
 }
 
-static int uncore_cpu_dying(unsigned int cpu)
-{
-       struct intel_uncore_type *type, **types = uncore_msr_uncores;
-       struct intel_uncore_pmu *pmu;
-       struct intel_uncore_box *box;
-       int i, pkg;
-
-       pkg = topology_logical_package_id(cpu);
-       for (; *types; types++) {
-               type = *types;
-               pmu = type->pmus;
-               for (i = 0; i < type->num_boxes; i++, pmu++) {
-                       box = pmu->boxes[pkg];
-                       if (box && atomic_dec_return(&box->refcnt) == 0)
-                               uncore_box_exit(box);
-               }
-       }
-       return 0;
-}
-
-static int first_init;
-
-static int uncore_cpu_starting(unsigned int cpu)
-{
-       struct intel_uncore_type *type, **types = uncore_msr_uncores;
-       struct intel_uncore_pmu *pmu;
-       struct intel_uncore_box *box;
-       int i, pkg, ncpus = 1;
-
-       if (first_init) {
-               /*
-                * On init we get the number of online cpus in the package
-                * and set refcount for all of them.
-                */
-               ncpus = cpumask_weight(topology_core_cpumask(cpu));
-       }
-
-       pkg = topology_logical_package_id(cpu);
-       for (; *types; types++) {
-               type = *types;
-               pmu = type->pmus;
-               for (i = 0; i < type->num_boxes; i++, pmu++) {
-                       box = pmu->boxes[pkg];
-                       if (!box)
-                               continue;
-                       /* The first cpu on a package activates the box */
-                       if (atomic_add_return(ncpus, &box->refcnt) == ncpus)
-                               uncore_box_init(box);
-               }
-       }
-
-       return 0;
-}
-
-static int uncore_cpu_prepare(unsigned int cpu)
-{
-       struct intel_uncore_type *type, **types = uncore_msr_uncores;
-       struct intel_uncore_pmu *pmu;
-       struct intel_uncore_box *box;
-       int i, pkg;
-
-       pkg = topology_logical_package_id(cpu);
-       for (; *types; types++) {
-               type = *types;
-               pmu = type->pmus;
-               for (i = 0; i < type->num_boxes; i++, pmu++) {
-                       if (pmu->boxes[pkg])
-                               continue;
-                       /* First cpu of a package allocates the box */
-                       box = uncore_alloc_box(type, cpu_to_node(cpu));
-                       if (!box)
-                               return -ENOMEM;
-                       box->pmu = pmu;
-                       box->pkgid = pkg;
-                       pmu->boxes[pkg] = box;
-               }
-       }
-       return 0;
-}
-
 static void uncore_change_type_ctx(struct intel_uncore_type *type, int old_cpu,
                                   int new_cpu)
 {
@@ -1177,12 +1079,14 @@ static void uncore_change_context(struct intel_uncore_type **uncores,
 
 static int uncore_event_cpu_offline(unsigned int cpu)
 {
-       int target;
+       struct intel_uncore_type *type, **types = uncore_msr_uncores;
+       struct intel_uncore_pmu *pmu;
+       struct intel_uncore_box *box;
+       int i, pkg, target;
 
        /* Check if exiting cpu is used for collecting uncore events */
        if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
-               return 0;
-
+               goto unref;
        /* Find a new cpu to collect uncore events */
        target = cpumask_any_but(topology_core_cpumask(cpu), cpu);
 
@@ -1194,12 +1098,82 @@ static int uncore_event_cpu_offline(unsigned int cpu)
 
        uncore_change_context(uncore_msr_uncores, cpu, target);
        uncore_change_context(uncore_pci_uncores, cpu, target);
+
+unref:
+       /* Clear the references */
+       pkg = topology_logical_package_id(cpu);
+       for (; *types; types++) {
+               type = *types;
+               pmu = type->pmus;
+               for (i = 0; i < type->num_boxes; i++, pmu++) {
+                       box = pmu->boxes[pkg];
+                       if (box && atomic_dec_return(&box->refcnt) == 0)
+                               uncore_box_exit(box);
+               }
+       }
        return 0;
 }
 
+static int allocate_boxes(struct intel_uncore_type **types,
+                        unsigned int pkg, unsigned int cpu)
+{
+       struct intel_uncore_box *box, *tmp;
+       struct intel_uncore_type *type;
+       struct intel_uncore_pmu *pmu;
+       LIST_HEAD(allocated);
+       int i;
+
+       /* Try to allocate all required boxes */
+       for (; *types; types++) {
+               type = *types;
+               pmu = type->pmus;
+               for (i = 0; i < type->num_boxes; i++, pmu++) {
+                       if (pmu->boxes[pkg])
+                               continue;
+                       box = uncore_alloc_box(type, cpu_to_node(cpu));
+                       if (!box)
+                               goto cleanup;
+                       box->pmu = pmu;
+                       box->pkgid = pkg;
+                       list_add(&box->active_list, &allocated);
+               }
+       }
+       /* Install them in the pmus */
+       list_for_each_entry_safe(box, tmp, &allocated, active_list) {
+               list_del_init(&box->active_list);
+               box->pmu->boxes[pkg] = box;
+       }
+       return 0;
+
+cleanup:
+       list_for_each_entry_safe(box, tmp, &allocated, active_list) {
+               list_del_init(&box->active_list);
+               kfree(box);
+       }
+       return -ENOMEM;
+}
+
 static int uncore_event_cpu_online(unsigned int cpu)
 {
-       int target;
+       struct intel_uncore_type *type, **types = uncore_msr_uncores;
+       struct intel_uncore_pmu *pmu;
+       struct intel_uncore_box *box;
+       int i, ret, pkg, target;
+
+       pkg = topology_logical_package_id(cpu);
+       ret = allocate_boxes(types, pkg, cpu);
+       if (ret)
+               return ret;
+
+       for (; *types; types++) {
+               type = *types;
+               pmu = type->pmus;
+               for (i = 0; i < type->num_boxes; i++, pmu++) {
+                       box = pmu->boxes[pkg];
+                       if (!box && atomic_inc_return(&box->refcnt) == 1)
+                               uncore_box_init(box);
+               }
+       }
 
        /*
         * Check if there is an online cpu in the package
@@ -1354,6 +1328,8 @@ static const struct x86_cpu_id intel_uncore_match[] __initconst = {
        X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_DESKTOP,skl_uncore_init),
        X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_MOBILE, skl_uncore_init),
        X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_X,      skx_uncore_init),
+       X86_UNCORE_MODEL_MATCH(INTEL_FAM6_KABYLAKE_MOBILE, skl_uncore_init),
+       X86_UNCORE_MODEL_MATCH(INTEL_FAM6_KABYLAKE_DESKTOP, skl_uncore_init),
        {},
 };
 
@@ -1389,38 +1365,16 @@ static int __init intel_uncore_init(void)
        if (cret && pret)
                return -ENODEV;
 
-       /*
-        * Install callbacks. Core will call them for each online cpu.
-        *
-        * The first online cpu of each package allocates and takes
-        * the refcounts for all other online cpus in that package.
-        * If msrs are not enabled no allocation is required and
-        * uncore_cpu_prepare() is not called for each online cpu.
-        */
-       if (!cret) {
-              ret = cpuhp_setup_state(CPUHP_PERF_X86_UNCORE_PREP,
-                                      "perf/x86/intel/uncore:prepare",
-                                      uncore_cpu_prepare, NULL);
-               if (ret)
-                       goto err;
-       } else {
-               cpuhp_setup_state_nocalls(CPUHP_PERF_X86_UNCORE_PREP,
-                                         "perf/x86/intel/uncore:prepare",
-                                         uncore_cpu_prepare, NULL);
-       }
-       first_init = 1;
-       cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_STARTING,
-                         "perf/x86/uncore:starting",
-                         uncore_cpu_starting, uncore_cpu_dying);
-       first_init = 0;
-       cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE,
-                         "perf/x86/uncore:online",
-                         uncore_event_cpu_online, uncore_event_cpu_offline);
+       /* Install hotplug callbacks to setup the targets for each package */
+       ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE,
+                               "perf/x86/intel/uncore:online",
+                               uncore_event_cpu_online,
+                               uncore_event_cpu_offline);
+       if (ret)
+               goto err;
        return 0;
 
 err:
-       /* Undo box->init_box() */
-       on_each_cpu_mask(&uncore_cpu_mask, uncore_exit_boxes, NULL, 1);
        uncore_types_exit(uncore_msr_uncores);
        uncore_pci_exit();
        return ret;
@@ -1429,9 +1383,7 @@ module_init(intel_uncore_init);
 
 static void __exit intel_uncore_exit(void)
 {
-       cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_UNCORE_ONLINE);
-       cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_UNCORE_STARTING);
-       cpuhp_remove_state_nocalls(CPUHP_PERF_X86_UNCORE_PREP);
+       cpuhp_remove_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE);
        uncore_types_exit(uncore_msr_uncores);
        uncore_pci_exit();
 }
index 2b892e2313a9f1251566315743039f48b2b3ad8e..5d6a53fd7521a91f376a57c2e1b83760e785fb7f 100644 (file)
@@ -7,7 +7,6 @@ generated-y += unistd_64_x32.h
 generated-y += xen-hypercalls.h
 
 generic-y += clkdev.h
-generic-y += cputime.h
 generic-y += dma-contiguous.h
 generic-y += early_ioremap.h
 generic-y += mcs_spinlock.h
index 0c5fbc68e82dd0a583b128f50f10875ac1252d0b..eff8e36aaf72083f9e7a3b53ca6783eebaa6276e 100644 (file)
@@ -195,7 +195,7 @@ static inline void native_apic_msr_write(u32 reg, u32 v)
 
 static inline void native_apic_msr_eoi_write(u32 reg, u32 v)
 {
-       wrmsr_notrace(APIC_BASE_MSR + (APIC_EOI >> 4), APIC_EOI_ACK, 0);
+       __wrmsr(APIC_BASE_MSR + (APIC_EOI >> 4), APIC_EOI_ACK, 0);
 }
 
 static inline u32 native_apic_msr_read(u32 reg)
index eafee3161d1c0fa04cd82b12dcf15003a89d04b4..4e7772387c6e92efc365d271f08ec3fef6208c9d 100644 (file)
 #define X86_FEATURE_XTOPOLOGY  ( 3*32+22) /* cpu topology enum extensions */
 #define X86_FEATURE_TSC_RELIABLE ( 3*32+23) /* TSC is known to be reliable */
 #define X86_FEATURE_NONSTOP_TSC        ( 3*32+24) /* TSC does not stop in C states */
-/* free, was #define X86_FEATURE_CLFLUSH_MONITOR ( 3*32+25) * "" clflush reqd with monitor */
+#define X86_FEATURE_CPUID      ( 3*32+25) /* CPU has CPUID instruction itself */
 #define X86_FEATURE_EXTD_APICID        ( 3*32+26) /* has extended APICID (8 bits) */
 #define X86_FEATURE_AMD_DCM     ( 3*32+27) /* multi-node processor */
 #define X86_FEATURE_APERFMPERF ( 3*32+28) /* APERFMPERF */
  *
  * Reuse free bits when adding new feature flags!
  */
-
+#define X86_FEATURE_RING3MWAIT ( 7*32+ 0) /* Ring 3 MONITOR/MWAIT */
 #define X86_FEATURE_CPB                ( 7*32+ 2) /* AMD Core Performance Boost */
 #define X86_FEATURE_EPB                ( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */
 #define X86_FEATURE_CAT_L3     ( 7*32+ 4) /* Cache Allocation Technology L3 */
 #define X86_FEATURE_AVX512VBMI  (16*32+ 1) /* AVX512 Vector Bit Manipulation instructions*/
 #define X86_FEATURE_PKU                (16*32+ 3) /* Protection Keys for Userspace */
 #define X86_FEATURE_OSPKE      (16*32+ 4) /* OS Protection Keys Enable */
+#define X86_FEATURE_AVX512_VPOPCNTDQ (16*32+14) /* POPCNT for vectors of DW/QW */
 #define X86_FEATURE_RDPID      (16*32+ 22) /* RDPID instruction */
 
 /* AMD-defined CPU features, CPUID level 0x80000007 (ebx), word 17 */
 #define X86_BUG_SWAPGS_FENCE   X86_BUG(11) /* SWAPGS without input dep on GS */
 #define X86_BUG_MONITOR                X86_BUG(12) /* IPI required to wake up remote CPU */
 #define X86_BUG_AMD_E400       X86_BUG(13) /* CPU is among the affected by Erratum 400 */
-
 #endif /* _ASM_X86_CPUFEATURES_H */
index ced283ac79dfff3ca6c580da509136845ed1ce5b..af95c47d5c9ef6ccb2617226535f63761e8039ef 100644 (file)
@@ -59,6 +59,17 @@ static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
 }
 #define div_u64_rem    div_u64_rem
 
+static inline u64 mul_u32_u32(u32 a, u32 b)
+{
+       u32 high, low;
+
+       asm ("mull %[b]" : "=a" (low), "=d" (high)
+                        : [a] "a" (a), [b] "rm" (b) );
+
+       return low | ((u64)high) << 32;
+}
+#define mul_u32_u32 mul_u32_u32
+
 #else
 # include <asm-generic/div64.h>
 #endif /* CONFIG_X86_32 */
index ec23d8e1297cc1a622818e834295783e95791822..67313f3a987401d845e6c9bfe62eede163a49613 100644 (file)
@@ -30,8 +30,6 @@ extern u64 e820_remove_range(u64 start, u64 size, unsigned old_type,
                             int checktype);
 extern void update_e820(void);
 extern void e820_setup_gap(void);
-extern int e820_search_gap(unsigned long *gapstart, unsigned long *gapsize,
-                       unsigned long start_addr, unsigned long long end_addr);
 struct setup_data;
 extern void parse_e820_ext(u64 phys_addr, u32 data_len);
 
index e99675b9c861dad48a9c4ba46cc63876255c2748..2f77bcefe6b494cf7dcd5f7158cfe3090893c584 100644 (file)
@@ -191,6 +191,7 @@ static inline efi_status_t efi_thunk_set_virtual_address_map(
 struct efi_config {
        u64 image_handle;
        u64 table;
+       u64 runtime_services;
        u64 boot_services;
        u64 text_output;
        efi_status_t (*call)(unsigned long, ...);
@@ -226,6 +227,10 @@ static inline bool efi_is_64bit(void)
 #define __efi_call_early(f, ...)                                       \
        __efi_early()->call((unsigned long)f, __VA_ARGS__);
 
+#define efi_call_runtime(f, ...)                                       \
+       __efi_early()->call(efi_table_attr(efi_runtime_services, f,     \
+               __efi_early()->runtime_services), __VA_ARGS__)
+
 extern bool efi_reboot_required(void);
 
 #else
index e7f155c3045e1b81d6b60e7af3ae7522e8c0f269..9d49c18b5ea9360feb5e5bb1fe378914154f34d5 100644 (file)
@@ -258,6 +258,15 @@ extern int force_personality32;
 
 #define ELF_HWCAP              (boot_cpu_data.x86_capability[CPUID_1_EDX])
 
+extern u32 elf_hwcap2;
+
+/*
+ * HWCAP2 supplies mask with kernel enabled CPU features, so that
+ * the application can discover that it can safely use them.
+ * The bits are defined in uapi/asm/hwcap2.h.
+ */
+#define ELF_HWCAP2             (elf_hwcap2)
+
 /* This yields a string that ld.so will use to load implementation
    specific libraries for optimization.  This is more specific in
    intent than poking at uname or /proc/cpuinfo.
index d4a684997497bbb431c8d7ead98ed3a438cba0fd..255645f60ca2b4be333de67a10c6780f364e6fb2 100644 (file)
@@ -87,6 +87,16 @@ extern void fpstate_init_soft(struct swregs_state *soft);
 #else
 static inline void fpstate_init_soft(struct swregs_state *soft) {}
 #endif
+
+static inline void fpstate_init_xstate(struct xregs_state *xsave)
+{
+       /*
+        * XRSTORS requires these bits set in xcomp_bv, or it will
+        * trigger #GP:
+        */
+       xsave->header.xcomp_bv = XCOMP_BV_COMPACTED_FORMAT | xfeatures_mask;
+}
+
 static inline void fpstate_init_fxstate(struct fxregs_state *fx)
 {
        fx->cwd = 0x37f;
index 49da9f497b908b676d9ec0c22c8646817bf1f52b..fe04491130aef0095aa6481fb93fe013415ac6f3 100644 (file)
@@ -27,7 +27,6 @@ extern void intel_mid_pwr_power_off(void);
 extern int intel_mid_pwr_get_lss_id(struct pci_dev *pdev);
 
 extern int get_gpio_by_name(const char *name);
-extern void intel_scu_device_register(struct platform_device *pdev);
 extern int __init sfi_parse_mrtc(struct sfi_table_header *table);
 extern int __init sfi_parse_mtmr(struct sfi_table_header *table);
 extern int sfi_mrtc_num;
@@ -42,10 +41,8 @@ struct devs_id {
        char name[SFI_NAME_LEN + 1];
        u8 type;
        u8 delay;
+       u8 msic;
        void *(*get_platform_data)(void *info);
-       /* Custom handler for devices */
-       void (*device_handler)(struct sfi_device_table_entry *pentry,
-                              struct devs_id *dev);
 };
 
 #define sfi_device(i)                                                          \
index d34bd370074b46662e5a96014ed4cd5b521f6045..7afb0e2f07f40d69ccbe44a70baea52bc70ef685 100644 (file)
@@ -164,6 +164,17 @@ static inline unsigned int isa_virt_to_bus(volatile void *address)
 #define virt_to_bus virt_to_phys
 #define bus_to_virt phys_to_virt
 
+/*
+ * The default ioremap() behavior is non-cached; if you need something
+ * else, you probably want one of the following.
+ */
+extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size);
+extern void __iomem *ioremap_uc(resource_size_t offset, unsigned long size);
+#define ioremap_uc ioremap_uc
+
+extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);
+extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size, unsigned long prot_val);
+
 /**
  * ioremap     -   map bus memory into CPU space
  * @offset:    bus address of the memory
@@ -178,17 +189,6 @@ static inline unsigned int isa_virt_to_bus(volatile void *address)
  * If the area you are trying to map is a PCI BAR you should have a
  * look at pci_iomap().
  */
-extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size);
-extern void __iomem *ioremap_uc(resource_size_t offset, unsigned long size);
-#define ioremap_uc ioremap_uc
-
-extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);
-extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size,
-                               unsigned long prot_val);
-
-/*
- * The default ioremap() behavior is non-cached:
- */
 static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
 {
        return ioremap_nocache(offset, size);
@@ -207,18 +207,42 @@ extern void set_iounmap_nonlazy(void);
  */
 #define xlate_dev_kmem_ptr(p)  p
 
+/**
+ * memset_io   Set a range of I/O memory to a constant value
+ * @addr:      The beginning of the I/O-memory range to set
+ * @val:       The value to set the memory to
+ * @count:     The number of bytes to set
+ *
+ * Set a range of I/O memory to a given value.
+ */
 static inline void
 memset_io(volatile void __iomem *addr, unsigned char val, size_t count)
 {
        memset((void __force *)addr, val, count);
 }
 
+/**
+ * memcpy_fromio       Copy a block of data from I/O memory
+ * @dst:               The (RAM) destination for the copy
+ * @src:               The (I/O memory) source for the data
+ * @count:             The number of bytes to copy
+ *
+ * Copy a block of data from I/O memory.
+ */
 static inline void
 memcpy_fromio(void *dst, const volatile void __iomem *src, size_t count)
 {
        memcpy(dst, (const void __force *)src, count);
 }
 
+/**
+ * memcpy_toio         Copy a block of data into I/O memory
+ * @dst:               The (I/O memory) destination for the copy
+ * @src:               The (RAM) source for the data
+ * @count:             The number of bytes to copy
+ *
+ * Copy a block of data to I/O memory.
+ */
 static inline void
 memcpy_toio(volatile void __iomem *dst, const void *src, size_t count)
 {
index 5132f2a6c0a2424016eeebd6fb717da18bbd76cc..e63873683d4a3a79b625699ebf66419d7e2d1de0 100644 (file)
 
 #define MCE_OVERFLOW 0         /* bit 0 in flags means overflow */
 
-/* Software defined banks */
-#define MCE_EXTENDED_BANK      128
-#define MCE_THERMAL_BANK       (MCE_EXTENDED_BANK + 0)
-
 #define MCE_LOG_LEN 32
 #define MCE_LOG_SIGNATURE      "MACHINECHECK"
 
@@ -193,6 +189,15 @@ extern struct mce_vendor_flags mce_flags;
 
 extern struct mca_config mca_cfg;
 extern struct mca_msr_regs msr_ops;
+
+enum mce_notifier_prios {
+       MCE_PRIO_SRAO           = INT_MAX,
+       MCE_PRIO_EXTLOG         = INT_MAX - 1,
+       MCE_PRIO_NFIT           = INT_MAX - 2,
+       MCE_PRIO_EDAC           = INT_MAX - 3,
+       MCE_PRIO_LOWEST         = 0,
+};
+
 extern void mce_register_decode_chain(struct notifier_block *nb);
 extern void mce_unregister_decode_chain(struct notifier_block *nb);
 
@@ -306,8 +311,6 @@ extern void (*deferred_error_int_vector)(void);
 
 void intel_init_thermal(struct cpuinfo_x86 *c);
 
-void mce_log_therm_throt_event(__u64 status);
-
 /* Interrupt Handler for core thermal thresholds */
 extern int (*platform_thermal_notify)(__u64 msr_val);
 
@@ -362,12 +365,13 @@ struct smca_hwid {
        unsigned int bank_type; /* Use with smca_bank_types for easy indexing. */
        u32 hwid_mcatype;       /* (hwid,mcatype) tuple */
        u32 xec_bitmap;         /* Bitmap of valid ExtErrorCodes; current max is 21. */
+       u8 count;               /* Number of instances. */
 };
 
 struct smca_bank {
        struct smca_hwid *hwid;
-       /* Instance ID */
-       u32 id;
+       u32 id;                 /* Value of MCA_IPID[InstanceId]. */
+       u8 sysfs_id;            /* Value used for sysfs name. */
 };
 
 extern struct smca_bank smca_banks[MAX_NR_BANKS];
index 38711df3bcb56b6939f2f84af16b920f0409049b..daadeeea00b1ec97dd04f2eea2c9e2fab50c7086 100644 (file)
@@ -7,18 +7,17 @@
 
 #define native_rdmsr(msr, val1, val2)                  \
 do {                                                   \
-       u64 __val = native_read_msr((msr));             \
+       u64 __val = __rdmsr((msr));                     \
        (void)((val1) = (u32)__val);                    \
        (void)((val2) = (u32)(__val >> 32));            \
 } while (0)
 
 #define native_wrmsr(msr, low, high)                   \
-       native_write_msr(msr, low, high)
+       __wrmsr(msr, low, high)
 
 #define native_wrmsrl(msr, val)                                \
-       native_write_msr((msr),                         \
-                        (u32)((u64)(val)),             \
-                        (u32)((u64)(val) >> 32))
+       __wrmsr((msr), (u32)((u64)(val)),               \
+                      (u32)((u64)(val) >> 32))
 
 struct ucode_patch {
        struct list_head plist;
@@ -140,6 +139,7 @@ extern void __init load_ucode_bsp(void);
 extern void load_ucode_ap(void);
 void reload_early_microcode(void);
 extern bool get_builtin_firmware(struct cpio_data *cd, const char *name);
+extern bool initrd_gone;
 #else
 static inline int __init microcode_init(void)                  { return 0; };
 static inline void __init load_ucode_bsp(void)                 { }
index 3e3e20be829a4ea5f3f795aa91016b38b932bb44..3d57009e168bb7241d3c1fc7e24cac6daedea031 100644 (file)
@@ -54,6 +54,4 @@ static inline int __init
 save_microcode_in_initrd_amd(unsigned int family) { return -EINVAL; }
 void reload_ucode_amd(void) {}
 #endif
-
-extern bool check_current_patch_level(u32 *rev, bool early);
 #endif /* _ASM_X86_MICROCODE_AMD_H */
index 710273c617b8d3d735d9aaacdc3349d599717204..00293a94ffaf5a68023022eb6c0b74ba5bac62ef 100644 (file)
 #define MSR_IA32_MISC_ENABLE_IP_PREF_DISABLE_BIT       39
 #define MSR_IA32_MISC_ENABLE_IP_PREF_DISABLE           (1ULL << MSR_IA32_MISC_ENABLE_IP_PREF_DISABLE_BIT)
 
+/* MISC_FEATURE_ENABLES non-architectural features */
+#define MSR_MISC_FEATURE_ENABLES       0x00000140
+
+#define MSR_MISC_FEATURE_ENABLES_RING3MWAIT_BIT                1
+
 #define MSR_IA32_TSC_DEADLINE          0x000006E0
 
 /* P4/Xeon+ specific */
index db0b90c3b03e2f092fdcb10c7021e8fb569321bc..898dba2e2e2cd4df06bb4e05298f01a97ff85e7a 100644 (file)
@@ -80,7 +80,14 @@ static inline void do_trace_read_msr(unsigned int msr, u64 val, int failed) {}
 static inline void do_trace_rdpmc(unsigned int msr, u64 val, int failed) {}
 #endif
 
-static inline unsigned long long native_read_msr(unsigned int msr)
+/*
+ * __rdmsr() and __wrmsr() are the two primitives which are the bare minimum MSR
+ * accessors and should not have any tracing or other functionality piggybacking
+ * on them - those are *purely* for accessing MSRs and nothing more. So don't even
+ * think of extending them - you will be slapped with a stinking trout or a frozen
+ * shark will reach you, wherever you are! You've been warned.
+ */
+static inline unsigned long long notrace __rdmsr(unsigned int msr)
 {
        DECLARE_ARGS(val, low, high);
 
@@ -88,11 +95,30 @@ static inline unsigned long long native_read_msr(unsigned int msr)
                     "2:\n"
                     _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_rdmsr_unsafe)
                     : EAX_EDX_RET(val, low, high) : "c" (msr));
-       if (msr_tracepoint_active(__tracepoint_read_msr))
-               do_trace_read_msr(msr, EAX_EDX_VAL(val, low, high), 0);
+
        return EAX_EDX_VAL(val, low, high);
 }
 
+static inline void notrace __wrmsr(unsigned int msr, u32 low, u32 high)
+{
+       asm volatile("1: wrmsr\n"
+                    "2:\n"
+                    _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_wrmsr_unsafe)
+                    : : "c" (msr), "a"(low), "d" (high) : "memory");
+}
+
+static inline unsigned long long native_read_msr(unsigned int msr)
+{
+       unsigned long long val;
+
+       val = __rdmsr(msr);
+
+       if (msr_tracepoint_active(__tracepoint_read_msr))
+               do_trace_read_msr(msr, val, 0);
+
+       return val;
+}
+
 static inline unsigned long long native_read_msr_safe(unsigned int msr,
                                                      int *err)
 {
@@ -114,31 +140,16 @@ static inline unsigned long long native_read_msr_safe(unsigned int msr,
        return EAX_EDX_VAL(val, low, high);
 }
 
-/* Can be uninlined because referenced by paravirt */
-static inline void notrace
-__native_write_msr_notrace(unsigned int msr, u32 low, u32 high)
-{
-       asm volatile("1: wrmsr\n"
-                    "2:\n"
-                    _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_wrmsr_unsafe)
-                    : : "c" (msr), "a"(low), "d" (high) : "memory");
-}
-
 /* Can be uninlined because referenced by paravirt */
 static inline void notrace
 native_write_msr(unsigned int msr, u32 low, u32 high)
 {
-       __native_write_msr_notrace(msr, low, high);
+       __wrmsr(msr, low, high);
+
        if (msr_tracepoint_active(__tracepoint_write_msr))
                do_trace_write_msr(msr, ((u64)high << 32 | low), 0);
 }
 
-static inline void
-wrmsr_notrace(unsigned int msr, u32 low, u32 high)
-{
-       __native_write_msr_notrace(msr, low, high);
-}
-
 /* Can be uninlined because referenced by paravirt */
 static inline int notrace
 native_write_msr_safe(unsigned int msr, u32 low, u32 high)
index b6c0b404898a71330d8c1cd923bd268dd2a64adc..fbc73360aea05128d6b40be23e261893cd47a523 100644 (file)
@@ -27,6 +27,7 @@ struct vm_area_struct;
 
 extern pgd_t swapper_pg_dir[1024];
 extern pgd_t initial_page_table[1024];
+extern pmd_t initial_pg_pmd[];
 
 static inline void pgtable_cache_init(void) { }
 static inline void check_pgt_cache(void) { }
@@ -75,4 +76,35 @@ do {                                         \
 #define kern_addr_valid(kaddr) (0)
 #endif
 
+/*
+ * This is how much memory in addition to the memory covered up to
+ * and including _end we need mapped initially.
+ * We need:
+ *     (KERNEL_IMAGE_SIZE/4096) / 1024 pages (worst case, non PAE)
+ *     (KERNEL_IMAGE_SIZE/4096) / 512 + 4 pages (worst case for PAE)
+ *
+ * Modulo rounding, each megabyte assigned here requires a kilobyte of
+ * memory, which is currently unreclaimed.
+ *
+ * This should be a multiple of a page.
+ *
+ * KERNEL_IMAGE_SIZE should be greater than pa(_end)
+ * and small than max_low_pfn, otherwise will waste some page table entries
+ */
+#if PTRS_PER_PMD > 1
+#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
+#else
+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
+#endif
+
+/*
+ * Number of possible pages in the lowmem region.
+ *
+ * We shift 2 by 31 instead of 1 by 32 to the left in order to avoid a
+ * gas warning about overflowing shift count when gas has been compiled
+ * with only a host target support using a 32-bit type for internal
+ * representation.
+ */
+#define LOWMEM_PAGES ((((2<<31) - __PAGE_OFFSET) >> PAGE_SHIFT))
+
 #endif /* _ASM_X86_PGTABLE_32_H */
index 1be64da0384ed8d5dea85563633c2740f5587888..e6cfe7ba2d65968611d049a136fc1b09506065e1 100644 (file)
@@ -104,6 +104,7 @@ struct cpuinfo_x86 {
        __u8                    x86_phys_bits;
        /* CPUID returned core id bits: */
        __u8                    x86_coreid_bits;
+       __u8                    cu_id;
        /* Max extended CPUID function supported: */
        __u32                   extended_cpuid_level;
        /* Maximum supported CPUID level, -1=no CPUID: */
index 921bea7a2708e2017f12184e09cde6203d6cf0e2..6d391909e8647d0abb33a7a04cb80b7d6c076be9 100644 (file)
@@ -23,9 +23,6 @@
 /* How long a lock should spin before we consider blocking */
 #define SPIN_THRESHOLD (1 << 15)
 
-extern struct static_key paravirt_ticketlocks_enabled;
-static __always_inline bool static_key_false(struct static_key *key);
-
 #include <asm/qspinlock.h>
 
 /*
index 062921ef34e9136100d3b4820826642dd45f223b..6686820feae9e64dfd9145d06d24ebdf104343a7 100644 (file)
@@ -10,6 +10,7 @@ struct mm_struct;
 
 extern enum uv_system_type get_uv_system_type(void);
 extern int is_uv_system(void);
+extern int is_uv_hubless(void);
 extern void uv_cpu_init(void);
 extern void uv_nmi_init(void);
 extern void uv_system_init(void);
@@ -23,6 +24,7 @@ extern const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
 
 static inline enum uv_system_type get_uv_system_type(void) { return UV_NONE; }
 static inline int is_uv_system(void)   { return 0; }
+static inline int is_uv_hubless(void)  { return 0; }
 static inline void uv_cpu_init(void)   { }
 static inline void uv_system_init(void)        { }
 static inline const struct cpumask *
index 097b80c989c4a915f543106f670034a8b99c56d7..72e8300b1e8a6a96eef10a918abb5b3b020014aa 100644 (file)
@@ -772,6 +772,7 @@ static inline int uv_num_possible_blades(void)
 
 /* Per Hub NMI support */
 extern void uv_nmi_setup(void);
+extern void uv_nmi_setup_hubless(void);
 
 /* BMC sets a bit this MMR non-zero before sending an NMI */
 #define UVH_NMI_MMR            UVH_SCRATCH5
@@ -799,6 +800,8 @@ struct uv_hub_nmi_s {
        atomic_t        read_mmr_count; /* count of MMR reads */
        atomic_t        nmi_count;      /* count of true UV NMIs */
        unsigned long   nmi_value;      /* last value read from NMI MMR */
+       bool            hub_present;    /* false means UV hubless system */
+       bool            pch_owner;      /* indicates this hub owns PCH */
 };
 
 struct uv_cpu_nmi_s {
index b10bf319ed206e132e7707f6080da700524450e3..5138dacf8bb8360511f7b3514f8ab0ac1a1e88f8 100644 (file)
@@ -135,7 +135,8 @@ struct boot_params {
        __u8  eddbuf_entries;                           /* 0x1e9 */
        __u8  edd_mbr_sig_buf_entries;                  /* 0x1ea */
        __u8  kbd_status;                               /* 0x1eb */
-       __u8  _pad5[3];                                 /* 0x1ec */
+       __u8  secure_boot;                              /* 0x1ec */
+       __u8  _pad5[2];                                 /* 0x1ed */
        /*
         * The sentinel is set to a nonzero value (0xff) in header.S.
         *
diff --git a/arch/x86/include/uapi/asm/hwcap2.h b/arch/x86/include/uapi/asm/hwcap2.h
new file mode 100644 (file)
index 0000000..0bd2be5
--- /dev/null
@@ -0,0 +1,7 @@
+#ifndef _ASM_X86_HWCAP2_H
+#define _ASM_X86_HWCAP2_H
+
+/* MONITOR/MWAIT enabled in Ring 3 */
+#define HWCAP2_RING3MWAIT              (1 << 0)
+
+#endif
index 581386c7e42953e654ee60fda2feb0941c4e9c99..bdcdb3b3a219308544c0e669ca7dd9831f3f5146 100644 (file)
@@ -101,7 +101,6 @@ obj-$(CONFIG_APB_TIMER)             += apb_timer.o
 
 obj-$(CONFIG_AMD_NB)           += amd_nb.o
 obj-$(CONFIG_DEBUG_RODATA_TEST)        += test_rodata.o
-obj-$(CONFIG_DEBUG_NX_TEST)    += test_nx.o
 obj-$(CONFIG_DEBUG_NMI_SELFTEST) += nmi_selftest.o
 
 obj-$(CONFIG_KVM_GUEST)                += kvm.o kvmclock.o
index 64422f850e95e2da7991ded073041f5df5b4dfae..7ff007ed899d1731c97eba25fc40ccac38a90309 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/bootmem.h>
 #include <linux/ioport.h>
 #include <linux/pci.h>
+#include <linux/efi-bgrt.h>
 
 #include <asm/irqdomain.h>
 #include <asm/pci_x86.h>
@@ -1557,6 +1558,12 @@ int __init early_acpi_boot_init(void)
        return 0;
 }
 
+static int __init acpi_parse_bgrt(struct acpi_table_header *table)
+{
+       efi_bgrt_init(table);
+       return 0;
+}
+
 int __init acpi_boot_init(void)
 {
        /* those are executed after early-quirks are executed */
@@ -1581,6 +1588,8 @@ int __init acpi_boot_init(void)
        acpi_process_madt();
 
        acpi_table_parse(ACPI_SIG_HPET, acpi_parse_hpet);
+       if (IS_ENABLED(CONFIG_ACPI_BGRT))
+               acpi_table_parse(ACPI_SIG_BGRT, acpi_parse_bgrt);
 
        if (!acpi_noirq)
                x86_init.pci.init = pci_acpi_init;
index af15f4444330b6c69fe20fc71a993070a9f26a5d..8233a630280f52052ffb9a5f7c89f273cedf0425 100644 (file)
@@ -12,7 +12,6 @@
 #include <linux/sched.h>
 
 #include <acpi/processor.h>
-#include <asm/acpi.h>
 #include <asm/mwait.h>
 #include <asm/special_insns.h>
 
@@ -89,7 +88,8 @@ static long acpi_processor_ffh_cstate_probe_cpu(void *_cx)
        retval = 0;
        /* If the HW does not support any sub-states in this C-state */
        if (num_cstate_subtype == 0) {
-               pr_warn(FW_BUG "ACPI MWAIT C-state 0x%x not supported by HW (0x%x)\n", cx->address, edx_part);
+               pr_warn(FW_BUG "ACPI MWAIT C-state 0x%x not supported by HW (0x%x)\n",
+                               cx->address, edx_part);
                retval = -1;
                goto out;
        }
@@ -104,8 +104,8 @@ static long acpi_processor_ffh_cstate_probe_cpu(void *_cx)
        if (!mwait_supported[cstate_type]) {
                mwait_supported[cstate_type] = 1;
                printk(KERN_DEBUG
-                       "Monitor-Mwait will be used to enter C-%d "
-                       "state\n", cx->type);
+                       "Monitor-Mwait will be used to enter C-%d state\n",
+                       cx->type);
        }
        snprintf(cx->desc,
                        ACPI_CX_DESC_LEN, "ACPI FFH INTEL MWAIT 0x%x",
@@ -166,6 +166,7 @@ EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_enter);
 static int __init ffh_cstate_init(void)
 {
        struct cpuinfo_x86 *c = &boot_cpu_data;
+
        if (c->x86_vendor != X86_VENDOR_INTEL)
                return -1;
 
index 5b7e43eff139b888bb757c5c7ce3172017c5158b..8567c851172c78535e5d6b2fee9c96003155169d 100644 (file)
@@ -529,18 +529,19 @@ static void lapic_timer_broadcast(const struct cpumask *mask)
  * The local apic timer can be used for any function which is CPU local.
  */
 static struct clock_event_device lapic_clockevent = {
-       .name                   = "lapic",
-       .features               = CLOCK_EVT_FEAT_PERIODIC |
-                                 CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP
-                                 | CLOCK_EVT_FEAT_DUMMY,
-       .shift                  = 32,
-       .set_state_shutdown     = lapic_timer_shutdown,
-       .set_state_periodic     = lapic_timer_set_periodic,
-       .set_state_oneshot      = lapic_timer_set_oneshot,
-       .set_next_event         = lapic_next_event,
-       .broadcast              = lapic_timer_broadcast,
-       .rating                 = 100,
-       .irq                    = -1,
+       .name                           = "lapic",
+       .features                       = CLOCK_EVT_FEAT_PERIODIC |
+                                         CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP
+                                         | CLOCK_EVT_FEAT_DUMMY,
+       .shift                          = 32,
+       .set_state_shutdown             = lapic_timer_shutdown,
+       .set_state_periodic             = lapic_timer_set_periodic,
+       .set_state_oneshot              = lapic_timer_set_oneshot,
+       .set_state_oneshot_stopped      = lapic_timer_shutdown,
+       .set_next_event                 = lapic_next_event,
+       .broadcast                      = lapic_timer_broadcast,
+       .rating                         = 100,
+       .irq                            = -1,
 };
 static DEFINE_PER_CPU(struct clock_event_device, lapic_events);
 
@@ -1245,7 +1246,7 @@ static void lapic_setup_esr(void)
 /**
  * setup_local_APIC - setup the local APIC
  *
- * Used to setup local APIC while initializing BSP or bringin up APs.
+ * Used to setup local APIC while initializing BSP or bringing up APs.
  * Always called with preemption disabled.
  */
 void setup_local_APIC(void)
@@ -2028,8 +2029,8 @@ void disconnect_bsp_APIC(int virt_wire_setup)
 /*
  * The number of allocated logical CPU IDs. Since logical CPU IDs are allocated
  * contiguously, it equals to current allocated max logical CPU ID plus 1.
- * All allocated CPU ID should be in [0, nr_logical_cpuidi), so the maximum of
- * nr_logical_cpuids is nr_cpu_ids.
+ * All allocated CPU IDs should be in the [0, nr_logical_cpuids) range,
+ * so the maximum of nr_logical_cpuids is nr_cpu_ids.
  *
  * NOTE: Reserve 0 for BSP.
  */
@@ -2094,7 +2095,7 @@ int __generic_processor_info(int apicid, int version, bool enabled)
         * Since fixing handling of boot_cpu_physical_apicid requires
         * another discussion and tests on each platform, we leave it
         * for now and here we use read_apic_id() directly in this
-        * function, generic_processor_info().
+        * function, __generic_processor_info().
         */
        if (disabled_cpu_apicid != BAD_APICID &&
            disabled_cpu_apicid != read_apic_id() &&
index 1e35dd06b090ee91189cb5a52fdf026f2ca5e74b..347bb9f6573723f84f439fdc7ebcb82c9e00db14 100644 (file)
@@ -1107,12 +1107,12 @@ int mp_map_gsi_to_irq(u32 gsi, unsigned int flags, struct irq_alloc_info *info)
 
        ioapic = mp_find_ioapic(gsi);
        if (ioapic < 0)
-               return -1;
+               return -ENODEV;
 
        pin = mp_find_ioapic_pin(ioapic, gsi);
        idx = find_irq_entry(ioapic, pin, mp_INT);
        if ((flags & IOAPIC_MAP_CHECK) && idx < 0)
-               return -1;
+               return -ENODEV;
 
        return mp_map_pin_to_irq(gsi, idx, ioapic, pin, flags, info);
 }
@@ -2117,6 +2117,7 @@ static inline void __init check_timer(void)
                        if (idx != -1 && irq_trigger(idx))
                                unmask_ioapic_irq(irq_get_chip_data(0));
                }
+               irq_domain_deactivate_irq(irq_data);
                irq_domain_activate_irq(irq_data);
                if (timer_irq_works()) {
                        if (disable_timer_pin_1 > 0)
@@ -2138,6 +2139,7 @@ static inline void __init check_timer(void)
                 * legacy devices should be connected to IO APIC #0
                 */
                replace_pin_at_irq_node(data, node, apic1, pin1, apic2, pin2);
+               irq_domain_deactivate_irq(irq_data);
                irq_domain_activate_irq(irq_data);
                legacy_pic->unmask(0);
                if (timer_irq_works()) {
index 35690a168cf7491d960b34c978a9125bba78c6a0..e9f8f8cdd57085db85dee8247b67150250308a42 100644 (file)
 
 DEFINE_PER_CPU(int, x2apic_extra_bits);
 
-#define PR_DEVEL(fmt, args...) pr_devel("%s: " fmt, __func__, args)
-
-static enum uv_system_type uv_system_type;
-static u64 gru_start_paddr, gru_end_paddr;
-static u64 gru_dist_base, gru_first_node_paddr = -1LL, gru_last_node_paddr;
-static u64 gru_dist_lmask, gru_dist_umask;
-static union uvh_apicid uvh_apicid;
-
-/* info derived from CPUID */
+static enum uv_system_type     uv_system_type;
+static bool                    uv_hubless_system;
+static u64                     gru_start_paddr, gru_end_paddr;
+static u64                     gru_dist_base, gru_first_node_paddr = -1LL, gru_last_node_paddr;
+static u64                     gru_dist_lmask, gru_dist_umask;
+static union uvh_apicid                uvh_apicid;
+
+/* Information derived from CPUID: */
 static struct {
        unsigned int apicid_shift;
        unsigned int apicid_mask;
        unsigned int socketid_shift;    /* aka pnode_shift for UV1/2/3 */
        unsigned int pnode_mask;
        unsigned int gpa_shift;
+       unsigned int gnode_shift;
 } uv_cpuid;
 
 int uv_min_hub_revision_id;
 EXPORT_SYMBOL_GPL(uv_min_hub_revision_id);
+
 unsigned int uv_apicid_hibits;
 EXPORT_SYMBOL_GPL(uv_apicid_hibits);
 
 static struct apic apic_x2apic_uv_x;
 static struct uv_hub_info_s uv_hub_info_node0;
 
-/* Set this to use hardware error handler instead of kernel panic */
+/* Set this to use hardware error handler instead of kernel panic: */
 static int disable_uv_undefined_panic = 1;
+
 unsigned long uv_undefined(char *str)
 {
        if (likely(!disable_uv_undefined_panic))
                panic("UV: error: undefined MMR: %s\n", str);
        else
                pr_crit("UV: error: undefined MMR: %s\n", str);
-       return ~0ul;    /* cause a machine fault  */
+
+       /* Cause a machine fault: */
+       return ~0ul;
 }
 EXPORT_SYMBOL(uv_undefined);
 
@@ -85,18 +89,19 @@ static unsigned long __init uv_early_read_mmr(unsigned long addr)
        mmr = early_ioremap(UV_LOCAL_MMR_BASE | addr, sizeof(*mmr));
        val = *mmr;
        early_iounmap(mmr, sizeof(*mmr));
+
        return val;
 }
 
 static inline bool is_GRU_range(u64 start, u64 end)
 {
        if (gru_dist_base) {
-               u64 su = start & gru_dist_umask; /* upper (incl pnode) bits */
-               u64 sl = start & gru_dist_lmask; /* base offset bits */
+               u64 su = start & gru_dist_umask; /* Upper (incl pnode) bits */
+               u64 sl = start & gru_dist_lmask; /* Base offset bits */
                u64 eu = end & gru_dist_umask;
                u64 el = end & gru_dist_lmask;
 
-               /* Must reside completely within a single GRU range */
+               /* Must reside completely within a single GRU range: */
                return (sl == gru_dist_base && el == gru_dist_base &&
                        su >= gru_first_node_paddr &&
                        su <= gru_last_node_paddr &&
@@ -133,13 +138,14 @@ static int __init early_get_pnodeid(void)
                break;
        case UV4_HUB_PART_NUMBER:
                uv_min_hub_revision_id += UV4_HUB_REVISION_BASE - 1;
+               uv_cpuid.gnode_shift = 2; /* min partition is 4 sockets */
                break;
        }
 
        uv_hub_info->hub_revision = uv_min_hub_revision_id;
        uv_cpuid.pnode_mask = (1 << m_n_config.s.n_skt) - 1;
        pnode = (node_id.s.node_id >> 1) & uv_cpuid.pnode_mask;
-       uv_cpuid.gpa_shift = 46;        /* default unless changed */
+       uv_cpuid.gpa_shift = 46;        /* Default unless changed */
 
        pr_info("UV: rev:%d part#:%x nodeid:%04x n_skt:%d pnmsk:%x pn:%x\n",
                node_id.s.revision, node_id.s.part_number, node_id.s.node_id,
@@ -147,11 +153,12 @@ static int __init early_get_pnodeid(void)
        return pnode;
 }
 
-/* [copied from arch/x86/kernel/cpu/topology.c:detect_extended_topology()] */
-#define SMT_LEVEL      0       /* leaf 0xb SMT level */
-#define INVALID_TYPE   0       /* leaf 0xb sub-leaf types */
-#define SMT_TYPE       1
-#define CORE_TYPE      2
+/* [Copied from arch/x86/kernel/cpu/topology.c:detect_extended_topology()] */
+
+#define SMT_LEVEL                      0       /* Leaf 0xb SMT level */
+#define INVALID_TYPE                   0       /* Leaf 0xb sub-leaf types */
+#define SMT_TYPE                       1
+#define CORE_TYPE                      2
 #define LEAFB_SUBTYPE(ecx)             (((ecx) >> 8) & 0xff)
 #define BITS_SHIFT_NEXT_LEVEL(eax)     ((eax) & 0x1f)
 
@@ -165,11 +172,13 @@ static void set_x2apic_bits(void)
                pr_info("UV: CPU does not have CPUID.11\n");
                return;
        }
+
        cpuid_count(0xb, SMT_LEVEL, &eax, &ebx, &ecx, &edx);
        if (ebx == 0 || (LEAFB_SUBTYPE(ecx) != SMT_TYPE)) {
                pr_info("UV: CPUID.11 not implemented\n");
                return;
        }
+
        sid_shift = BITS_SHIFT_NEXT_LEVEL(eax);
        sub_index = 1;
        do {
@@ -180,8 +189,9 @@ static void set_x2apic_bits(void)
                }
                sub_index++;
        } while (LEAFB_SUBTYPE(ecx) != INVALID_TYPE);
-       uv_cpuid.apicid_shift = 0;
-       uv_cpuid.apicid_mask = (~(-1 << sid_shift));
+
+       uv_cpuid.apicid_shift   = 0;
+       uv_cpuid.apicid_mask    = (~(-1 << sid_shift));
        uv_cpuid.socketid_shift = sid_shift;
 }
 
@@ -192,10 +202,8 @@ static void __init early_get_apic_socketid_shift(void)
 
        set_x2apic_bits();
 
-       pr_info("UV: apicid_shift:%d apicid_mask:0x%x\n",
-               uv_cpuid.apicid_shift, uv_cpuid.apicid_mask);
-       pr_info("UV: socketid_shift:%d pnode_mask:0x%x\n",
-               uv_cpuid.socketid_shift, uv_cpuid.pnode_mask);
+       pr_info("UV: apicid_shift:%d apicid_mask:0x%x\n", uv_cpuid.apicid_shift, uv_cpuid.apicid_mask);
+       pr_info("UV: socketid_shift:%d pnode_mask:0x%x\n", uv_cpuid.socketid_shift, uv_cpuid.pnode_mask);
 }
 
 /*
@@ -208,10 +216,8 @@ static void __init uv_set_apicid_hibit(void)
        union uv1h_lb_target_physical_apic_id_mask_u apicid_mask;
 
        if (is_uv1_hub()) {
-               apicid_mask.v =
-                       uv_early_read_mmr(UV1H_LB_TARGET_PHYSICAL_APIC_ID_MASK);
-               uv_apicid_hibits =
-                       apicid_mask.s1.bit_enables & UV_APICID_HIBIT_MASK;
+               apicid_mask.v = uv_early_read_mmr(UV1H_LB_TARGET_PHYSICAL_APIC_ID_MASK);
+               uv_apicid_hibits = apicid_mask.s1.bit_enables & UV_APICID_HIBIT_MASK;
        }
 }
 
@@ -220,20 +226,26 @@ static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
        int pnodeid;
        int uv_apic;
 
-       if (strncmp(oem_id, "SGI", 3) != 0)
+       if (strncmp(oem_id, "SGI", 3) != 0) {
+               if (strncmp(oem_id, "NSGI", 4) == 0) {
+                       uv_hubless_system = true;
+                       pr_info("UV: OEM IDs %s/%s, HUBLESS\n",
+                               oem_id, oem_table_id);
+               }
                return 0;
+       }
 
        if (numa_off) {
                pr_err("UV: NUMA is off, disabling UV support\n");
                return 0;
        }
 
-       /* Setup early hub type field in uv_hub_info for Node 0 */
+       /* Set up early hub type field in uv_hub_info for Node 0 */
        uv_cpu_info->p_uv_hub_info = &uv_hub_info_node0;
 
        /*
         * Determine UV arch type.
-        *   SGI: UV100/1000
+        *   SGI:  UV100/1000
         *   SGI2: UV2000/3000
         *   SGI3: UV300 (truncated to 4 chars because of different varieties)
         *   SGI4: UV400 (truncated to 4 chars because of different varieties)
@@ -249,31 +261,32 @@ static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
 
        pnodeid = early_get_pnodeid();
        early_get_apic_socketid_shift();
-       x86_platform.is_untracked_pat_range =  uv_is_untracked_pat_range;
+
+       x86_platform.is_untracked_pat_range = uv_is_untracked_pat_range;
        x86_platform.nmi_init = uv_nmi_init;
 
-       if (!strcmp(oem_table_id, "UVX")) {             /* most common */
+       if (!strcmp(oem_table_id, "UVX")) {
+               /* This is the most common hardware variant: */
                uv_system_type = UV_X2APIC;
                uv_apic = 0;
 
-       } else if (!strcmp(oem_table_id, "UVH")) {      /* only UV1 systems */
+       } else if (!strcmp(oem_table_id, "UVH")) {
+               /* Only UV1 systems: */
                uv_system_type = UV_NON_UNIQUE_APIC;
-               __this_cpu_write(x2apic_extra_bits,
-                       pnodeid << uvh_apicid.s.pnode_shift);
+               __this_cpu_write(x2apic_extra_bits, pnodeid << uvh_apicid.s.pnode_shift);
                uv_set_apicid_hibit();
                uv_apic = 1;
 
-       } else  if (!strcmp(oem_table_id, "UVL")) {     /* only used for */
-               uv_system_type = UV_LEGACY_APIC;        /* very small systems */
+       } else if (!strcmp(oem_table_id, "UVL")) {
+               /* Only used for very small systems:  */
+               uv_system_type = UV_LEGACY_APIC;
                uv_apic = 0;
 
        } else {
                goto badbios;
        }
 
-       pr_info("UV: OEM IDs %s/%s, System/HUB Types %d/%d, uv_apic %d\n",
-               oem_id, oem_table_id, uv_system_type,
-               uv_min_hub_revision_id, uv_apic);
+       pr_info("UV: OEM IDs %s/%s, System/HUB Types %d/%d, uv_apic %d\n", oem_id, oem_table_id, uv_system_type, uv_min_hub_revision_id, uv_apic);
 
        return uv_apic;
 
@@ -294,6 +307,12 @@ int is_uv_system(void)
 }
 EXPORT_SYMBOL_GPL(is_uv_system);
 
+int is_uv_hubless(void)
+{
+       return uv_hubless_system;
+}
+EXPORT_SYMBOL_GPL(is_uv_hubless);
+
 void **__uv_hub_info_list;
 EXPORT_SYMBOL_GPL(__uv_hub_info_list);
 
@@ -306,16 +325,18 @@ EXPORT_SYMBOL_GPL(uv_possible_blades);
 unsigned long sn_rtc_cycles_per_second;
 EXPORT_SYMBOL(sn_rtc_cycles_per_second);
 
-/* the following values are used for the per node hub info struct */
-static __initdata unsigned short *_node_to_pnode;
-static __initdata unsigned short _min_socket, _max_socket;
-static __initdata unsigned short _min_pnode, _max_pnode, _gr_table_len;
-static __initdata struct uv_gam_range_entry *uv_gre_table;
-static __initdata struct uv_gam_parameters *uv_gp_table;
-static __initdata unsigned short *_socket_to_node;
-static __initdata unsigned short *_socket_to_pnode;
-static __initdata unsigned short *_pnode_to_socket;
-static __initdata struct uv_gam_range_s *_gr_table;
+/* The following values are used for the per node hub info struct */
+static __initdata unsigned short               *_node_to_pnode;
+static __initdata unsigned short               _min_socket, _max_socket;
+static __initdata unsigned short               _min_pnode, _max_pnode, _gr_table_len;
+static __initdata struct uv_gam_range_entry    *uv_gre_table;
+static __initdata struct uv_gam_parameters     *uv_gp_table;
+static __initdata unsigned short               *_socket_to_node;
+static __initdata unsigned short               *_socket_to_pnode;
+static __initdata unsigned short               *_pnode_to_socket;
+
+static __initdata struct uv_gam_range_s                *_gr_table;
+
 #define        SOCK_EMPTY      ((unsigned short)~0)
 
 extern int uv_hub_info_version(void)
@@ -324,7 +345,7 @@ extern int uv_hub_info_version(void)
 }
 EXPORT_SYMBOL(uv_hub_info_version);
 
-/* Build GAM range lookup table */
+/* Build GAM range lookup table: */
 static __init void build_uv_gr_table(void)
 {
        struct uv_gam_range_entry *gre = uv_gre_table;
@@ -342,25 +363,24 @@ static __init void build_uv_gr_table(void)
 
        for (; gre->type != UV_GAM_RANGE_TYPE_UNUSED; gre++) {
                if (gre->type == UV_GAM_RANGE_TYPE_HOLE) {
-                       if (!ram_limit) {   /* mark hole between ram/non-ram */
+                       if (!ram_limit) {
+                               /* Mark hole between RAM/non-RAM: */
                                ram_limit = last_limit;
                                last_limit = gre->limit;
                                lsid++;
                                continue;
                        }
                        last_limit = gre->limit;
-                       pr_info("UV: extra hole in GAM RE table @%d\n",
-                               (int)(gre - uv_gre_table));
+                       pr_info("UV: extra hole in GAM RE table @%d\n", (int)(gre - uv_gre_table));
                        continue;
                }
                if (_max_socket < gre->sockid) {
-                       pr_err("UV: GAM table sockid(%d) too large(>%d) @%d\n",
-                               gre->sockid, _max_socket,
-                               (int)(gre - uv_gre_table));
+                       pr_err("UV: GAM table sockid(%d) too large(>%d) @%d\n", gre->sockid, _max_socket, (int)(gre - uv_gre_table));
                        continue;
                }
                sid = gre->sockid - _min_socket;
-               if (lsid < sid) {               /* new range */
+               if (lsid < sid) {
+                       /* New range: */
                        grt = &_gr_table[indx];
                        grt->base = lindx;
                        grt->nasid = gre->nasid;
@@ -369,27 +389,32 @@ static __init void build_uv_gr_table(void)
                        lindx = indx++;
                        continue;
                }
-               if (lsid == sid && !ram_limit) {        /* update range */
-                       if (grt->limit == last_limit) { /* .. if contiguous */
+               /* Update range: */
+               if (lsid == sid && !ram_limit) {
+                       /* .. if contiguous: */
+                       if (grt->limit == last_limit) {
                                grt->limit = last_limit = gre->limit;
                                continue;
                        }
                }
-               if (!ram_limit) {               /* non-contiguous ram range */
+               /* Non-contiguous RAM range: */
+               if (!ram_limit) {
                        grt++;
                        grt->base = lindx;
                        grt->nasid = gre->nasid;
                        grt->limit = last_limit = gre->limit;
                        continue;
                }
-               grt++;                          /* non-contiguous/non-ram */
-               grt->base = grt - _gr_table;    /* base is this entry */
+               /* Non-contiguous/non-RAM: */
+               grt++;
+               /* base is this entry */
+               grt->base = grt - _gr_table;
                grt->nasid = gre->nasid;
                grt->limit = last_limit = gre->limit;
                lsid++;
        }
 
-       /* shorten table if possible */
+       /* Shorten table if possible */
        grt++;
        i = grt - _gr_table;
        if (i < _gr_table_len) {
@@ -403,16 +428,15 @@ static __init void build_uv_gr_table(void)
                }
        }
 
-       /* display resultant gam range table */
+       /* Display resultant GAM range table: */
        for (i = 0, grt = _gr_table; i < _gr_table_len; i++, grt++) {
+               unsigned long start, end;
                int gb = grt->base;
-               unsigned long start = gb < 0 ?  0 :
-                       (unsigned long)_gr_table[gb].limit << UV_GAM_RANGE_SHFT;
-               unsigned long end =
-                       (unsigned long)grt->limit << UV_GAM_RANGE_SHFT;
 
-               pr_info("UV: GAM Range %2d %04x 0x%013lx-0x%013lx (%d)\n",
-                       i, grt->nasid, start, end, gb);
+               start = gb < 0 ?  0 : (unsigned long)_gr_table[gb].limit << UV_GAM_RANGE_SHFT;
+               end = (unsigned long)grt->limit << UV_GAM_RANGE_SHFT;
+
+               pr_info("UV: GAM Range %2d %04x 0x%013lx-0x%013lx (%d)\n", i, grt->nasid, start, end, gb);
        }
 }
 
@@ -423,16 +447,19 @@ static int uv_wakeup_secondary(int phys_apicid, unsigned long start_rip)
 
        pnode = uv_apicid_to_pnode(phys_apicid);
        phys_apicid |= uv_apicid_hibits;
+
        val = (1UL << UVH_IPI_INT_SEND_SHFT) |
            (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) |
            ((start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) |
            APIC_DM_INIT;
+
        uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
 
        val = (1UL << UVH_IPI_INT_SEND_SHFT) |
            (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) |
            ((start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) |
            APIC_DM_STARTUP;
+
        uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
 
        return 0;
@@ -566,7 +593,7 @@ static struct apic apic_x2apic_uv_x __ro_after_init = {
        .apic_id_registered             = uv_apic_id_registered,
 
        .irq_delivery_mode              = dest_Fixed,
-       .irq_dest_mode                  = 0, /* physical */
+       .irq_dest_mode                  = 0, /* Physical */
 
        .target_cpus                    = online_target_cpus,
        .disable_esr                    = 0,
@@ -627,23 +654,22 @@ static __init void get_lowmem_redirect(unsigned long *base, unsigned long *size)
                switch (i) {
                case 0:
                        m_redirect = UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR;
-                       m_overlay = UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR;
+                       m_overlay  = UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR;
                        break;
                case 1:
                        m_redirect = UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR;
-                       m_overlay = UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR;
+                       m_overlay  = UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR;
                        break;
                case 2:
                        m_redirect = UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR;
-                       m_overlay = UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR;
+                       m_overlay  = UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR;
                        break;
                }
                alias.v = uv_read_local_mmr(m_overlay);
                if (alias.s.enable && alias.s.base == 0) {
                        *size = (1UL << alias.s.m_alias);
                        redirect.v = uv_read_local_mmr(m_redirect);
-                       *base = (unsigned long)redirect.s.dest_base
-                                                       << DEST_SHIFT;
+                       *base = (unsigned long)redirect.s.dest_base << DEST_SHIFT;
                        return;
                }
        }
@@ -652,8 +678,7 @@ static __init void get_lowmem_redirect(unsigned long *base, unsigned long *size)
 
 enum map_type {map_wb, map_uc};
 
-static __init void map_high(char *id, unsigned long base, int pshift,
-                       int bshift, int max_pnode, enum map_type map_type)
+static __init void map_high(char *id, unsigned long base, int pshift, int bshift, int max_pnode, enum map_type map_type)
 {
        unsigned long bytes, paddr;
 
@@ -678,16 +703,19 @@ static __init void map_gru_distributed(unsigned long c)
        int nid;
 
        gru.v = c;
-       /* only base bits 42:28 relevant in dist mode */
+
+       /* Only base bits 42:28 relevant in dist mode */
        gru_dist_base = gru.v & 0x000007fff0000000UL;
        if (!gru_dist_base) {
                pr_info("UV: Map GRU_DIST base address NULL\n");
                return;
        }
+
        bytes = 1UL << UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT;
        gru_dist_lmask = ((1UL << uv_hub_info->m_val) - 1) & ~(bytes - 1);
        gru_dist_umask = ~((1UL << uv_hub_info->m_val) - 1);
        gru_dist_base &= gru_dist_lmask; /* Clear bits above M */
+
        for_each_online_node(nid) {
                paddr = ((u64)uv_node_to_pnode(nid) << uv_hub_info->m_val) |
                                gru_dist_base;
@@ -695,11 +723,12 @@ static __init void map_gru_distributed(unsigned long c)
                gru_first_node_paddr = min(paddr, gru_first_node_paddr);
                gru_last_node_paddr = max(paddr, gru_last_node_paddr);
        }
+
        /* Save upper (63:M) bits of address only for is_GRU_range */
        gru_first_node_paddr &= gru_dist_umask;
        gru_last_node_paddr &= gru_dist_umask;
-       pr_debug("UV: Map GRU_DIST base 0x%016llx  0x%016llx - 0x%016llx\n",
-               gru_dist_base, gru_first_node_paddr, gru_last_node_paddr);
+
+       pr_debug("UV: Map GRU_DIST base 0x%016llx  0x%016llx - 0x%016llx\n", gru_dist_base, gru_first_node_paddr, gru_last_node_paddr);
 }
 
 static __init void map_gru_high(int max_pnode)
@@ -719,6 +748,7 @@ static __init void map_gru_high(int max_pnode)
                map_gru_distributed(gru.v);
                return;
        }
+
        base = (gru.v & mask) >> shift;
        map_high("GRU", base, shift, shift, max_pnode, map_wb);
        gru_start_paddr = ((u64)base << shift);
@@ -772,8 +802,8 @@ static __init void map_mmioh_high_uv3(int index, int min_pnode, int max_pnode)
 
        id = mmiohs[index].id;
        overlay.v = uv_read_local_mmr(mmiohs[index].overlay);
-       pr_info("UV: %s overlay 0x%lx base:0x%x m_io:%d\n",
-               id, overlay.v, overlay.s3.base, overlay.s3.m_io);
+
+       pr_info("UV: %s overlay 0x%lx base:0x%x m_io:%d\n", id, overlay.v, overlay.s3.base, overlay.s3.m_io);
        if (!overlay.s3.enable) {
                pr_info("UV: %s disabled\n", id);
                return;
@@ -784,7 +814,8 @@ static __init void map_mmioh_high_uv3(int index, int min_pnode, int max_pnode)
        m_io = overlay.s3.m_io;
        mmr = mmiohs[index].redirect;
        n = UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH;
-       min_pnode *= 2;                         /* convert to NASID */
+       /* Convert to NASID: */
+       min_pnode *= 2;
        max_pnode *= 2;
        max_io = lnasid = fi = li = -1;
 
@@ -793,16 +824,18 @@ static __init void map_mmioh_high_uv3(int index, int min_pnode, int max_pnode)
 
                redirect.v = uv_read_local_mmr(mmr + i * 8);
                nasid = redirect.s3.nasid;
+               /* Invalid NASID: */
                if (nasid < min_pnode || max_pnode < nasid)
-                       nasid = -1;             /* invalid NASID */
+                       nasid = -1;
 
                if (nasid == lnasid) {
                        li = i;
-                       if (i != n-1)           /* last entry check */
+                       /* Last entry check: */
+                       if (i != n-1)
                                continue;
                }
 
-               /* check if we have a cached (or last) redirect to print */
+               /* Check if we have a cached (or last) redirect to print: */
                if (lnasid != -1 || (i == n-1 && nasid != -1))  {
                        unsigned long addr1, addr2;
                        int f, l;
@@ -814,12 +847,9 @@ static __init void map_mmioh_high_uv3(int index, int min_pnode, int max_pnode)
                                f = fi;
                                l = li;
                        }
-                       addr1 = (base << shift) +
-                               f * (1ULL << m_io);
-                       addr2 = (base << shift) +
-                               (l + 1) * (1ULL << m_io);
-                       pr_info("UV: %s[%03d..%03d] NASID 0x%04x ADDR 0x%016lx - 0x%016lx\n",
-                               id, fi, li, lnasid, addr1, addr2);
+                       addr1 = (base << shift) + f * (1ULL << m_io);
+                       addr2 = (base << shift) + (l + 1) * (1ULL << m_io);
+                       pr_info("UV: %s[%03d..%03d] NASID 0x%04x ADDR 0x%016lx - 0x%016lx\n", id, fi, li, lnasid, addr1, addr2);
                        if (max_io < l)
                                max_io = l;
                }
@@ -827,8 +857,7 @@ static __init void map_mmioh_high_uv3(int index, int min_pnode, int max_pnode)
                lnasid = nasid;
        }
 
-       pr_info("UV: %s base:0x%lx shift:%d M_IO:%d MAX_IO:%d\n",
-               id, base, shift, m_io, max_io);
+       pr_info("UV: %s base:0x%lx shift:%d M_IO:%d MAX_IO:%d\n", id, base, shift, m_io, max_io);
 
        if (max_io >= 0)
                map_high(id, base, shift, m_io, max_io, map_uc);
@@ -841,36 +870,35 @@ static __init void map_mmioh_high(int min_pnode, int max_pnode)
        int shift, enable, m_io, n_io;
 
        if (is_uv3_hub() || is_uv4_hub()) {
-               /* Map both MMIOH Regions */
+               /* Map both MMIOH regions: */
                map_mmioh_high_uv3(0, min_pnode, max_pnode);
                map_mmioh_high_uv3(1, min_pnode, max_pnode);
                return;
        }
 
        if (is_uv1_hub()) {
-               mmr = UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR;
-               shift = UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT;
-               mmioh.v = uv_read_local_mmr(mmr);
-               enable = !!mmioh.s1.enable;
-               base = mmioh.s1.base;
-               m_io = mmioh.s1.m_io;
-               n_io = mmioh.s1.n_io;
+               mmr     = UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR;
+               shift   = UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT;
+               mmioh.v = uv_read_local_mmr(mmr);
+               enable  = !!mmioh.s1.enable;
+               base    = mmioh.s1.base;
+               m_io    = mmioh.s1.m_io;
+               n_io    = mmioh.s1.n_io;
        } else if (is_uv2_hub()) {
-               mmr = UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR;
-               shift = UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT;
-               mmioh.v = uv_read_local_mmr(mmr);
-               enable = !!mmioh.s2.enable;
-               base = mmioh.s2.base;
-               m_io = mmioh.s2.m_io;
-               n_io = mmioh.s2.n_io;
-       } else
+               mmr     = UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR;
+               shift   = UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT;
+               mmioh.v = uv_read_local_mmr(mmr);
+               enable  = !!mmioh.s2.enable;
+               base    = mmioh.s2.base;
+               m_io    = mmioh.s2.m_io;
+               n_io    = mmioh.s2.n_io;
+       } else {
                return;
+       }
 
        if (enable) {
                max_pnode &= (1 << n_io) - 1;
-               pr_info(
-                   "UV: base:0x%lx shift:%d N_IO:%d M_IO:%d max_pnode:0x%x\n",
-                       base, shift, m_io, n_io, max_pnode);
+               pr_info("UV: base:0x%lx shift:%d N_IO:%d M_IO:%d max_pnode:0x%x\n", base, shift, m_io, n_io, max_pnode);
                map_high("MMIOH", base, shift, m_io, max_pnode, map_uc);
        } else {
                pr_info("UV: MMIOH disabled\n");
@@ -888,16 +916,16 @@ static __init void uv_rtc_init(void)
        long status;
        u64 ticks_per_sec;
 
-       status = uv_bios_freq_base(BIOS_FREQ_BASE_REALTIME_CLOCK,
-                                       &ticks_per_sec);
+       status = uv_bios_freq_base(BIOS_FREQ_BASE_REALTIME_CLOCK, &ticks_per_sec);
+
        if (status != BIOS_STATUS_SUCCESS || ticks_per_sec < 100000) {
-               printk(KERN_WARNING
-                       "unable to determine platform RTC clock frequency, "
-                       "guessing.\n");
-               /* BIOS gives wrong value for clock freq. so guess */
+               pr_warn("UV: unable to determine platform RTC clock frequency, guessing.\n");
+
+               /* BIOS gives wrong value for clock frequency, so guess: */
                sn_rtc_cycles_per_second = 1000000000000UL / 30000UL;
-       } else
+       } else {
                sn_rtc_cycles_per_second = ticks_per_sec;
+       }
 }
 
 /*
@@ -908,19 +936,19 @@ static void uv_heartbeat(unsigned long ignored)
        struct timer_list *timer = &uv_scir_info->timer;
        unsigned char bits = uv_scir_info->state;
 
-       /* flip heartbeat bit */
+       /* Flip heartbeat bit: */
        bits ^= SCIR_CPU_HEARTBEAT;
 
-       /* is this cpu idle? */
+       /* Is this CPU idle? */
        if (idle_cpu(raw_smp_processor_id()))
                bits &= ~SCIR_CPU_ACTIVITY;
        else
                bits |= SCIR_CPU_ACTIVITY;
 
-       /* update system controller interface reg */
+       /* Update system controller interface reg: */
        uv_set_scir_bits(bits);
 
-       /* enable next timer period */
+       /* Enable next timer period: */
        mod_timer(timer, jiffies + SCIR_CPU_HB_INTERVAL);
 }
 
@@ -935,7 +963,7 @@ static int uv_heartbeat_enable(unsigned int cpu)
                add_timer_on(timer, cpu);
                uv_cpu_scir_info(cpu)->enabled = 1;
 
-               /* also ensure that boot cpu is enabled */
+               /* Also ensure that boot CPU is enabled: */
                cpu = 0;
        }
        return 0;
@@ -968,9 +996,11 @@ static __init int uv_init_heartbeat(void)
 {
        int cpu;
 
-       if (is_uv_system())
+       if (is_uv_system()) {
                for_each_online_cpu(cpu)
                        uv_heartbeat_enable(cpu);
+       }
+
        return 0;
 }
 
@@ -979,14 +1009,10 @@ late_initcall(uv_init_heartbeat);
 #endif /* !CONFIG_HOTPLUG_CPU */
 
 /* Direct Legacy VGA I/O traffic to designated IOH */
-int uv_set_vga_state(struct pci_dev *pdev, bool decode,
-                     unsigned int command_bits, u32 flags)
+int uv_set_vga_state(struct pci_dev *pdev, bool decode, unsigned int command_bits, u32 flags)
 {
        int domain, bus, rc;
 
-       PR_DEVEL("devfn %x decode %d cmd %x flags %d\n",
-                       pdev->devfn, decode, command_bits, flags);
-
        if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
                return 0;
 
@@ -997,13 +1023,12 @@ int uv_set_vga_state(struct pci_dev *pdev, bool decode,
        bus = pdev->bus->number;
 
        rc = uv_bios_set_legacy_vga_target(decode, domain, bus);
-       PR_DEVEL("vga decode %d %x:%x, rc: %d\n", decode, domain, bus, rc);
 
        return rc;
 }
 
 /*
- * Called on each cpu to initialize the per_cpu UV data area.
+ * Called on each CPU to initialize the per_cpu UV data area.
  * FIXME: hotplug not supported yet
  */
 void uv_cpu_init(void)
@@ -1030,90 +1055,79 @@ static void get_mn(struct mn *mnp)
        union uvh_rh_gam_config_mmr_u m_n_config;
        union uv3h_gr0_gam_gr_config_u m_gr_config;
 
-       m_n_config.v = uv_read_local_mmr(UVH_RH_GAM_CONFIG_MMR);
-       mnp->n_val = m_n_config.s.n_skt;
+       /* Make sure the whole structure is well initialized: */
+       memset(mnp, 0, sizeof(*mnp));
+
+       m_n_config.v    = uv_read_local_mmr(UVH_RH_GAM_CONFIG_MMR);
+       mnp->n_val      = m_n_config.s.n_skt;
+
        if (is_uv4_hub()) {
-               mnp->m_val = 0;
-               mnp->n_lshift = 0;
+               mnp->m_val      = 0;
+               mnp->n_lshift   = 0;
        } else if (is_uv3_hub()) {
-               mnp->m_val = m_n_config.s3.m_skt;
-               m_gr_config.v = uv_read_local_mmr(UV3H_GR0_GAM_GR_CONFIG);
-               mnp->n_lshift = m_gr_config.s3.m_skt;
+               mnp->m_val      = m_n_config.s3.m_skt;
+               m_gr_config.v   = uv_read_local_mmr(UV3H_GR0_GAM_GR_CONFIG);
+               mnp->n_lshift   = m_gr_config.s3.m_skt;
        } else if (is_uv2_hub()) {
-               mnp->m_val = m_n_config.s2.m_skt;
-               mnp->n_lshift = mnp->m_val == 40 ? 40 : 39;
+               mnp->m_val      = m_n_config.s2.m_skt;
+               mnp->n_lshift   = mnp->m_val == 40 ? 40 : 39;
        } else if (is_uv1_hub()) {
-               mnp->m_val = m_n_config.s1.m_skt;
-               mnp->n_lshift = mnp->m_val;
+               mnp->m_val      = m_n_config.s1.m_skt;
+               mnp->n_lshift   = mnp->m_val;
        }
        mnp->m_shift = mnp->m_val ? 64 - mnp->m_val : 0;
 }
 
-void __init uv_init_hub_info(struct uv_hub_info_s *hub_info)
+void __init uv_init_hub_info(struct uv_hub_info_s *hi)
 {
-       struct mn mn = {0};     /* avoid unitialized warnings */
        union uvh_node_id_u node_id;
+       struct mn mn;
 
        get_mn(&mn);
-       hub_info->m_val = mn.m_val;
-       hub_info->n_val = mn.n_val;
-       hub_info->m_shift = mn.m_shift;
-       hub_info->n_lshift = mn.n_lshift ? mn.n_lshift : 0;
-
-       hub_info->hub_revision = uv_hub_info->hub_revision;
-       hub_info->pnode_mask = uv_cpuid.pnode_mask;
-       hub_info->min_pnode = _min_pnode;
-       hub_info->min_socket = _min_socket;
-       hub_info->pnode_to_socket = _pnode_to_socket;
-       hub_info->socket_to_node = _socket_to_node;
-       hub_info->socket_to_pnode = _socket_to_pnode;
-       hub_info->gr_table_len = _gr_table_len;
-       hub_info->gr_table = _gr_table;
-       hub_info->gpa_mask = mn.m_val ?
+       hi->gpa_mask = mn.m_val ?
                (1UL << (mn.m_val + mn.n_val)) - 1 :
                (1UL << uv_cpuid.gpa_shift) - 1;
 
-       node_id.v = uv_read_local_mmr(UVH_NODE_ID);
-       hub_info->gnode_extra =
-               (node_id.s.node_id & ~((1 << mn.n_val) - 1)) >> 1;
-
-       hub_info->gnode_upper =
-               ((unsigned long)hub_info->gnode_extra << mn.m_val);
+       hi->m_val               = mn.m_val;
+       hi->n_val               = mn.n_val;
+       hi->m_shift             = mn.m_shift;
+       hi->n_lshift            = mn.n_lshift ? mn.n_lshift : 0;
+       hi->hub_revision        = uv_hub_info->hub_revision;
+       hi->pnode_mask          = uv_cpuid.pnode_mask;
+       hi->min_pnode           = _min_pnode;
+       hi->min_socket          = _min_socket;
+       hi->pnode_to_socket     = _pnode_to_socket;
+       hi->socket_to_node      = _socket_to_node;
+       hi->socket_to_pnode     = _socket_to_pnode;
+       hi->gr_table_len        = _gr_table_len;
+       hi->gr_table            = _gr_table;
+
+       node_id.v               = uv_read_local_mmr(UVH_NODE_ID);
+       uv_cpuid.gnode_shift    = max_t(unsigned int, uv_cpuid.gnode_shift, mn.n_val);
+       hi->gnode_extra         = (node_id.s.node_id & ~((1 << uv_cpuid.gnode_shift) - 1)) >> 1;
+       hi->gnode_upper         = (unsigned long)hi->gnode_extra << mn.m_val;
 
        if (uv_gp_table) {
-               hub_info->global_mmr_base = uv_gp_table->mmr_base;
-               hub_info->global_mmr_shift = uv_gp_table->mmr_shift;
-               hub_info->global_gru_base = uv_gp_table->gru_base;
-               hub_info->global_gru_shift = uv_gp_table->gru_shift;
-               hub_info->gpa_shift = uv_gp_table->gpa_shift;
-               hub_info->gpa_mask = (1UL << hub_info->gpa_shift) - 1;
+               hi->global_mmr_base     = uv_gp_table->mmr_base;
+               hi->global_mmr_shift    = uv_gp_table->mmr_shift;
+               hi->global_gru_base     = uv_gp_table->gru_base;
+               hi->global_gru_shift    = uv_gp_table->gru_shift;
+               hi->gpa_shift           = uv_gp_table->gpa_shift;
+               hi->gpa_mask            = (1UL << hi->gpa_shift) - 1;
        } else {
-               hub_info->global_mmr_base =
-                       uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR) &
-                                       ~UV_MMR_ENABLE;
-               hub_info->global_mmr_shift = _UV_GLOBAL_MMR64_PNODE_SHIFT;
+               hi->global_mmr_base     = uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR) & ~UV_MMR_ENABLE;
+               hi->global_mmr_shift    = _UV_GLOBAL_MMR64_PNODE_SHIFT;
        }
 
-       get_lowmem_redirect(
-               &hub_info->lowmem_remap_base, &hub_info->lowmem_remap_top);
-
-       hub_info->apic_pnode_shift = uv_cpuid.socketid_shift;
-
-       /* show system specific info */
-       pr_info("UV: N:%d M:%d m_shift:%d n_lshift:%d\n",
-               hub_info->n_val, hub_info->m_val,
-               hub_info->m_shift, hub_info->n_lshift);
-
-       pr_info("UV: gpa_mask/shift:0x%lx/%d pnode_mask:0x%x apic_pns:%d\n",
-               hub_info->gpa_mask, hub_info->gpa_shift,
-               hub_info->pnode_mask, hub_info->apic_pnode_shift);
+       get_lowmem_redirect(&hi->lowmem_remap_base, &hi->lowmem_remap_top);
 
-       pr_info("UV: mmr_base/shift:0x%lx/%ld gru_base/shift:0x%lx/%ld\n",
-               hub_info->global_mmr_base, hub_info->global_mmr_shift,
-               hub_info->global_gru_base, hub_info->global_gru_shift);
+       hi->apic_pnode_shift = uv_cpuid.socketid_shift;
 
-       pr_info("UV: gnode_upper:0x%lx gnode_extra:0x%x\n",
-               hub_info->gnode_upper, hub_info->gnode_extra);
+       /* Show system specific info: */
+       pr_info("UV: N:%d M:%d m_shift:%d n_lshift:%d\n", hi->n_val, hi->m_val, hi->m_shift, hi->n_lshift);
+       pr_info("UV: gpa_mask/shift:0x%lx/%d pnode_mask:0x%x apic_pns:%d\n", hi->gpa_mask, hi->gpa_shift, hi->pnode_mask, hi->apic_pnode_shift);
+       pr_info("UV: mmr_base/shift:0x%lx/%ld gru_base/shift:0x%lx/%ld\n", hi->global_mmr_base, hi->global_mmr_shift, hi->global_gru_base, hi->global_gru_shift);
+       pr_info("UV: gnode_upper:0x%lx gnode_extra:0x%x\n", hi->gnode_upper, hi->gnode_extra);
 }
 
 static void __init decode_gam_params(unsigned long ptr)
@@ -1139,12 +1153,9 @@ static void __init decode_gam_rng_tbl(unsigned long ptr)
        for (; gre->type != UV_GAM_RANGE_TYPE_UNUSED; gre++) {
                if (!index) {
                        pr_info("UV: GAM Range Table...\n");
-                       pr_info("UV:  # %20s %14s %5s %4s %5s %3s %2s\n",
-                               "Range", "", "Size", "Type", "NASID",
-                               "SID", "PN");
+                       pr_info("UV:  # %20s %14s %5s %4s %5s %3s %2s\n", "Range", "", "Size", "Type", "NASID", "SID", "PN");
                }
-               pr_info(
-               "UV: %2d: 0x%014lx-0x%014lx %5luG %3d   %04x  %02x %02x\n",
+               pr_info("UV: %2d: 0x%014lx-0x%014lx %5luG %3d   %04x  %02x %02x\n",
                        index++,
                        (unsigned long)lgre << UV_GAM_RANGE_SHFT,
                        (unsigned long)gre->limit << UV_GAM_RANGE_SHFT,
@@ -1162,29 +1173,32 @@ static void __init decode_gam_rng_tbl(unsigned long ptr)
                if (pnode_max < gre->pnode)
                        pnode_max = gre->pnode;
        }
-       _min_socket = sock_min;
-       _max_socket = sock_max;
-       _min_pnode = pnode_min;
-       _max_pnode = pnode_max;
-       _gr_table_len = index;
-       pr_info(
-       "UV: GRT: %d entries, sockets(min:%x,max:%x) pnodes(min:%x,max:%x)\n",
-               index, _min_socket, _max_socket, _min_pnode, _max_pnode);
+       _min_socket     = sock_min;
+       _max_socket     = sock_max;
+       _min_pnode      = pnode_min;
+       _max_pnode      = pnode_max;
+       _gr_table_len   = index;
+
+       pr_info("UV: GRT: %d entries, sockets(min:%x,max:%x) pnodes(min:%x,max:%x)\n", index, _min_socket, _max_socket, _min_pnode, _max_pnode);
 }
 
-static void __init decode_uv_systab(void)
+static int __init decode_uv_systab(void)
 {
        struct uv_systab *st;
        int i;
 
+       if (uv_hub_info->hub_revision < UV4_HUB_REVISION_BASE)
+               return 0;       /* No extended UVsystab required */
+
        st = uv_systab;
-       if ((!st || st->revision < UV_SYSTAB_VERSION_UV4) && !is_uv4_hub())
-               return;
-       if (st->revision != UV_SYSTAB_VERSION_UV4_LATEST) {
-               pr_crit(
-               "UV: BIOS UVsystab version(%x) mismatch, expecting(%x)\n",
-                       st->revision, UV_SYSTAB_VERSION_UV4_LATEST);
-               BUG();
+       if ((!st) || (st->revision < UV_SYSTAB_VERSION_UV4_LATEST)) {
+               int rev = st ? st->revision : 0;
+
+               pr_err("UV: BIOS UVsystab version(%x) mismatch, expecting(%x)\n", rev, UV_SYSTAB_VERSION_UV4_LATEST);
+               pr_err("UV: Cannot support UV operations, switching to generic PC\n");
+               uv_system_type = UV_NONE;
+
+               return -EINVAL;
        }
 
        for (i = 0; st->entry[i].type != UV_SYSTAB_TYPE_UNUSED; i++) {
@@ -1205,10 +1219,11 @@ static void __init decode_uv_systab(void)
                        break;
                }
        }
+       return 0;
 }
 
 /*
- * Setup physical blade translations from UVH_NODE_PRESENT_TABLE
+ * Set up physical blade translations from UVH_NODE_PRESENT_TABLE
  * .. NB: UVH_NODE_PRESENT_TABLE is going away,
  * .. being replaced by GAM Range Table
  */
@@ -1244,14 +1259,13 @@ static void __init build_socket_tables(void)
        if (!gre) {
                if (is_uv1_hub() || is_uv2_hub() || is_uv3_hub()) {
                        pr_info("UV: No UVsystab socket table, ignoring\n");
-                       return;         /* not required */
+                       return;
                }
-               pr_crit(
-               "UV: Error: UVsystab address translations not available!\n");
+               pr_crit("UV: Error: UVsystab address translations not available!\n");
                BUG();
        }
 
-       /* build socket id -> node id, pnode */
+       /* Build socket id -> node id, pnode */
        num = maxsock - minsock + 1;
        bytes = num * sizeof(_socket_to_node[0]);
        _socket_to_node = kmalloc(bytes, GFP_KERNEL);
@@ -1268,27 +1282,27 @@ static void __init build_socket_tables(void)
        for (i = 0; i < nump; i++)
                _pnode_to_socket[i] = SOCK_EMPTY;
 
-       /* fill in pnode/node/addr conversion list values */
+       /* Fill in pnode/node/addr conversion list values: */
        pr_info("UV: GAM Building socket/pnode conversion tables\n");
        for (; gre->type != UV_GAM_RANGE_TYPE_UNUSED; gre++) {
                if (gre->type == UV_GAM_RANGE_TYPE_HOLE)
                        continue;
                i = gre->sockid - minsock;
+               /* Duplicate: */
                if (_socket_to_pnode[i] != SOCK_EMPTY)
-                       continue;       /* duplicate */
+                       continue;
                _socket_to_pnode[i] = gre->pnode;
 
                i = gre->pnode - minpnode;
                _pnode_to_socket[i] = gre->sockid;
 
-               pr_info(
-               "UV: sid:%02x type:%d nasid:%04x pn:%02x pn2s:%2x\n",
+               pr_info("UV: sid:%02x type:%d nasid:%04x pn:%02x pn2s:%2x\n",
                        gre->sockid, gre->type, gre->nasid,
                        _socket_to_pnode[gre->sockid - minsock],
                        _pnode_to_socket[gre->pnode - minpnode]);
        }
 
-       /* Set socket -> node values */
+       /* Set socket -> node values: */
        lnid = -1;
        for_each_present_cpu(cpu) {
                int nid = cpu_to_node(cpu);
@@ -1304,7 +1318,7 @@ static void __init build_socket_tables(void)
                        sockid, apicid, nid);
        }
 
-       /* Setup physical blade to pnode translation from GAM Range Table */
+       /* Set up physical blade to pnode translation from GAM Range Table: */
        bytes = num_possible_nodes() * sizeof(_node_to_pnode[0]);
        _node_to_pnode = kmalloc(bytes, GFP_KERNEL);
        BUG_ON(!_node_to_pnode);
@@ -1314,8 +1328,7 @@ static void __init build_socket_tables(void)
 
                for (sockid = minsock; sockid <= maxsock; sockid++) {
                        if (lnid == _socket_to_node[sockid - minsock]) {
-                               _node_to_pnode[lnid] =
-                                       _socket_to_pnode[sockid - minsock];
+                               _node_to_pnode[lnid] = _socket_to_pnode[sockid - minsock];
                                break;
                        }
                }
@@ -1332,8 +1345,7 @@ static void __init build_socket_tables(void)
        pr_info("UV: Checking socket->node/pnode for identity maps\n");
        if (minsock == 0) {
                for (i = 0; i < num; i++)
-                       if (_socket_to_node[i] == SOCK_EMPTY ||
-                               i != _socket_to_node[i])
+                       if (_socket_to_node[i] == SOCK_EMPTY || i != _socket_to_node[i])
                                break;
                if (i >= num) {
                        kfree(_socket_to_node);
@@ -1354,7 +1366,7 @@ static void __init build_socket_tables(void)
        }
 }
 
-void __init uv_system_init(void)
+static void __init uv_system_init_hub(void)
 {
        struct uv_hub_info_s hub_info = {0};
        int bytes, cpu, nodeid;
@@ -1372,8 +1384,13 @@ void __init uv_system_init(void)
 
        map_low_mmrs();
 
-       uv_bios_init();                 /* get uv_systab for decoding */
-       decode_uv_systab();
+       /* Get uv_systab for decoding: */
+       uv_bios_init();
+
+       /* If there's an UVsystab problem then abort UV init: */
+       if (decode_uv_systab() < 0)
+               return;
+
        build_socket_tables();
        build_uv_gr_table();
        uv_init_hub_info(&hub_info);
@@ -1381,14 +1398,10 @@ void __init uv_system_init(void)
        if (!_node_to_pnode)
                boot_init_possible_blades(&hub_info);
 
-       /* uv_num_possible_blades() is really the hub count */
-       pr_info("UV: Found %d hubs, %d nodes, %d cpus\n",
-                       uv_num_possible_blades(),
-                       num_possible_nodes(),
-                       num_possible_cpus());
+       /* uv_num_possible_blades() is really the hub count: */
+       pr_info("UV: Found %d hubs, %d nodes, %d CPUs\n", uv_num_possible_blades(), num_possible_nodes(), num_possible_cpus());
 
-       uv_bios_get_sn_info(0, &uv_type, &sn_partition_id, &sn_coherency_id,
-                           &sn_region_size, &system_serial_number);
+       uv_bios_get_sn_info(0, &uv_type, &sn_partition_id, &sn_coherency_id, &sn_region_size, &system_serial_number);
        hub_info.coherency_domain_number = sn_coherency_id;
        uv_rtc_init();
 
@@ -1401,33 +1414,31 @@ void __init uv_system_init(void)
                struct uv_hub_info_s *new_hub;
 
                if (__uv_hub_info_list[nodeid]) {
-                       pr_err("UV: Node %d UV HUB already initialized!?\n",
-                               nodeid);
+                       pr_err("UV: Node %d UV HUB already initialized!?\n", nodeid);
                        BUG();
                }
 
                /* Allocate new per hub info list */
-               new_hub = (nodeid == 0) ?
-                       &uv_hub_info_node0 :
-                       kzalloc_node(bytes, GFP_KERNEL, nodeid);
+               new_hub = (nodeid == 0) ?  &uv_hub_info_node0 : kzalloc_node(bytes, GFP_KERNEL, nodeid);
                BUG_ON(!new_hub);
                __uv_hub_info_list[nodeid] = new_hub;
                new_hub = uv_hub_info_list(nodeid);
                BUG_ON(!new_hub);
                *new_hub = hub_info;
 
-               /* Use information from GAM table if available */
+               /* Use information from GAM table if available: */
                if (_node_to_pnode)
                        new_hub->pnode = _node_to_pnode[nodeid];
-               else    /* Fill in during cpu loop */
+               else /* Or fill in during CPU loop: */
                        new_hub->pnode = 0xffff;
+
                new_hub->numa_blade_id = uv_node_to_blade_id(nodeid);
                new_hub->memory_nid = -1;
                new_hub->nr_possible_cpus = 0;
                new_hub->nr_online_cpus = 0;
        }
 
-       /* Initialize per cpu info */
+       /* Initialize per CPU info: */
        for_each_possible_cpu(cpu) {
                int apicid = per_cpu(x86_cpu_to_apicid, cpu);
                int numa_node_id;
@@ -1438,22 +1449,24 @@ void __init uv_system_init(void)
                pnode = uv_apicid_to_pnode(apicid);
 
                uv_cpu_info_per(cpu)->p_uv_hub_info = uv_hub_info_list(nodeid);
-               uv_cpu_info_per(cpu)->blade_cpu_id =
-                       uv_cpu_hub_info(cpu)->nr_possible_cpus++;
+               uv_cpu_info_per(cpu)->blade_cpu_id = uv_cpu_hub_info(cpu)->nr_possible_cpus++;
                if (uv_cpu_hub_info(cpu)->memory_nid == -1)
                        uv_cpu_hub_info(cpu)->memory_nid = cpu_to_node(cpu);
-               if (nodeid != numa_node_id &&   /* init memoryless node */
+
+               /* Init memoryless node: */
+               if (nodeid != numa_node_id &&
                    uv_hub_info_list(numa_node_id)->pnode == 0xffff)
                        uv_hub_info_list(numa_node_id)->pnode = pnode;
                else if (uv_cpu_hub_info(cpu)->pnode == 0xffff)
                        uv_cpu_hub_info(cpu)->pnode = pnode;
+
                uv_cpu_scir_info(cpu)->offset = uv_scir_offset(apicid);
        }
 
        for_each_node(nodeid) {
                unsigned short pnode = uv_hub_info_list(nodeid)->pnode;
 
-               /* Add pnode info for pre-GAM list nodes without cpus */
+               /* Add pnode info for pre-GAM list nodes without CPUs: */
                if (pnode == 0xffff) {
                        unsigned long paddr;
 
@@ -1479,15 +1492,30 @@ void __init uv_system_init(void)
        uv_scir_register_cpu_notifier();
        proc_mkdir("sgi_uv", NULL);
 
-       /* register Legacy VGA I/O redirection handler */
+       /* Register Legacy VGA I/O redirection handler: */
        pci_register_set_vga_state(uv_set_vga_state);
 
        /*
         * For a kdump kernel the reset must be BOOT_ACPI, not BOOT_EFI, as
-        * EFI is not enabled in the kdump kernel.
+        * EFI is not enabled in the kdump kernel:
         */
        if (is_kdump_kernel())
                reboot_type = BOOT_ACPI;
 }
 
+/*
+ * There is a small amount of UV specific code needed to initialize a
+ * UV system that does not have a "UV HUB" (referred to as "hubless").
+ */
+void __init uv_system_init(void)
+{
+       if (likely(!is_uv_system() && !is_uv_hubless()))
+               return;
+
+       if (is_uv_system())
+               uv_system_init_hub();
+       else
+               uv_nmi_setup_hubless();
+}
+
 apic_driver(apic_x2apic_uv_x);
index 45d44c173cf9e125df6f6b4bf9ae50d6ad19f393..4a7080c84a5a541edc4454c969c12c95543c198a 100644 (file)
@@ -905,8 +905,8 @@ static int apm_cpu_idle(struct cpuidle_device *dev,
 {
        static int use_apm_idle; /* = 0 */
        static unsigned int last_jiffies; /* = 0 */
-       static unsigned int last_stime; /* = 0 */
-       cputime_t stime, utime;
+       static u64 last_stime; /* = 0 */
+       u64 stime, utime;
 
        int apm_idle_done = 0;
        unsigned int jiffies_since_last_check = jiffies - last_jiffies;
@@ -919,7 +919,7 @@ recalc:
        } else if (jiffies_since_last_check > idle_period) {
                unsigned int idle_percentage;
 
-               idle_percentage = cputime_to_jiffies(stime - last_stime);
+               idle_percentage = nsecs_to_jiffies(stime - last_stime);
                idle_percentage *= 100;
                idle_percentage /= jiffies_since_last_check;
                use_apm_idle = (idle_percentage > idle_threshold);
index c62e015b126cf5e3f880ae8ea1c32a19f39d7044..de827d6ac8c2e8ad751eba8699cd1e0ada0e7f4b 100644 (file)
@@ -81,6 +81,7 @@ void common(void) {
 
        BLANK();
        OFFSET(BP_scratch, boot_params, scratch);
+       OFFSET(BP_secure_boot, boot_params, secure_boot);
        OFFSET(BP_loadflags, boot_params, hdr.loadflags);
        OFFSET(BP_hardware_subarch, boot_params, hdr.hardware_subarch);
        OFFSET(BP_version, boot_params, hdr.version);
index 1d3167269a6717902149171fe755123a5c654eb6..4e95b2e0d95fed354f23b09ea3491a7b7d4a35b9 100644 (file)
@@ -309,8 +309,22 @@ static void amd_get_topology(struct cpuinfo_x86 *c)
 
        /* get information required for multi-node processors */
        if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
+               u32 eax, ebx, ecx, edx;
 
-               node_id = cpuid_ecx(0x8000001e) & 7;
+               cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
+
+               node_id  = ecx & 0xff;
+               smp_num_siblings = ((ebx >> 8) & 0xff) + 1;
+
+               if (c->x86 == 0x15)
+                       c->cu_id = ebx & 0xff;
+
+               if (c->x86 >= 0x17) {
+                       c->cpu_core_id = ebx & 0xff;
+
+                       if (smp_num_siblings > 1)
+                               c->x86_max_cores /= smp_num_siblings;
+               }
 
                /*
                 * We may have multiple LLCs if L3 caches exist, so check if we
@@ -541,8 +555,10 @@ static void early_init_amd(struct cpuinfo_x86 *c)
        if (c->x86_power & (1 << 8)) {
                set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
                set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
-               if (!check_tsc_unstable())
-                       set_sched_clock_stable();
+               if (check_tsc_unstable())
+                       clear_sched_clock_stable();
+       } else {
+               clear_sched_clock_stable();
        }
 
        /* Bit 12 of 8000_0007 edx is accumulated power mechanism. */
index 1661d8ec92805191cd4581c365db68ea5acfdf02..2c234a6d94c4482e191b5feee3a12752f30d2c95 100644 (file)
@@ -1,5 +1,5 @@
-#include <linux/bitops.h>
-#include <linux/kernel.h>
+
+#include <linux/sched.h>
 
 #include <asm/cpufeature.h>
 #include <asm/e820.h>
@@ -104,6 +104,8 @@ static void early_init_centaur(struct cpuinfo_x86 *c)
 #ifdef CONFIG_X86_64
        set_cpu_cap(c, X86_FEATURE_SYSENTER32);
 #endif
+
+       clear_sched_clock_stable();
 }
 
 static void init_centaur(struct cpuinfo_x86 *c)
index 9bab7a8a42936e32270e3573a17a1cd87fb580e0..f07005e6f4616f3b2504d59e8cbfff9efca1b127 100644 (file)
@@ -35,6 +35,7 @@
 #include <asm/desc.h>
 #include <asm/fpu/internal.h>
 #include <asm/mtrr.h>
+#include <asm/hwcap2.h>
 #include <linux/numa.h>
 #include <asm/asm.h>
 #include <asm/bugs.h>
@@ -51,6 +52,8 @@
 
 #include "cpu.h"
 
+u32 elf_hwcap2 __read_mostly;
+
 /* all of these masks are initialized in setup_cpu_local_masks() */
 cpumask_var_t cpu_initialized_mask;
 cpumask_var_t cpu_callout_mask;
@@ -83,6 +86,7 @@ static void default_init(struct cpuinfo_x86 *c)
                        strcpy(c->x86_model_id, "386");
        }
 #endif
+       clear_sched_clock_stable();
 }
 
 static const struct cpu_dev default_cpu = {
@@ -655,6 +659,16 @@ void cpu_detect(struct cpuinfo_x86 *c)
        }
 }
 
+static void apply_forced_caps(struct cpuinfo_x86 *c)
+{
+       int i;
+
+       for (i = 0; i < NCAPINTS; i++) {
+               c->x86_capability[i] &= ~cpu_caps_cleared[i];
+               c->x86_capability[i] |= cpu_caps_set[i];
+       }
+}
+
 void get_cpu_cap(struct cpuinfo_x86 *c)
 {
        u32 eax, ebx, ecx, edx;
@@ -748,6 +762,13 @@ void get_cpu_cap(struct cpuinfo_x86 *c)
                c->x86_capability[CPUID_8000_000A_EDX] = cpuid_edx(0x8000000a);
 
        init_scattered_cpuid_features(c);
+
+       /*
+        * Clear/Set all flags overridden by options, after probe.
+        * This needs to happen each time we re-probe, which may happen
+        * several times during CPU initialization.
+        */
+       apply_forced_caps(c);
 }
 
 static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
@@ -801,14 +822,12 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
        memset(&c->x86_capability, 0, sizeof c->x86_capability);
        c->extended_cpuid_level = 0;
 
-       if (!have_cpuid_p())
-               identify_cpu_without_cpuid(c);
-
        /* cyrix could have cpuid enabled via c_identify()*/
        if (have_cpuid_p()) {
                cpu_detect(c);
                get_cpu_vendor(c);
                get_cpu_cap(c);
+               setup_force_cpu_cap(X86_FEATURE_CPUID);
 
                if (this_cpu->c_early_init)
                        this_cpu->c_early_init(c);
@@ -818,6 +837,9 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
 
                if (this_cpu->c_bsp_init)
                        this_cpu->c_bsp_init(c);
+       } else {
+               identify_cpu_without_cpuid(c);
+               setup_clear_cpu_cap(X86_FEATURE_CPUID);
        }
 
        setup_force_cpu_cap(X86_FEATURE_ALWAYS);
@@ -1015,6 +1037,7 @@ static void identify_cpu(struct cpuinfo_x86 *c)
        c->x86_model_id[0] = '\0';  /* Unset */
        c->x86_max_cores = 1;
        c->x86_coreid_bits = 0;
+       c->cu_id = 0xff;
 #ifdef CONFIG_X86_64
        c->x86_clflush_size = 64;
        c->x86_phys_bits = 36;
@@ -1034,10 +1057,7 @@ static void identify_cpu(struct cpuinfo_x86 *c)
                this_cpu->c_identify(c);
 
        /* Clear/Set all flags overridden by options, after probe */
-       for (i = 0; i < NCAPINTS; i++) {
-               c->x86_capability[i] &= ~cpu_caps_cleared[i];
-               c->x86_capability[i] |= cpu_caps_set[i];
-       }
+       apply_forced_caps(c);
 
 #ifdef CONFIG_X86_64
        c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
@@ -1055,6 +1075,8 @@ static void identify_cpu(struct cpuinfo_x86 *c)
         */
        if (this_cpu->c_init)
                this_cpu->c_init(c);
+       else
+               clear_sched_clock_stable();
 
        /* Disable the PN if appropriate */
        squash_the_stupid_serial_number(c);
@@ -1096,10 +1118,7 @@ static void identify_cpu(struct cpuinfo_x86 *c)
         * Clear/Set all flags overridden by options, need do it
         * before following smp all cpus cap AND.
         */
-       for (i = 0; i < NCAPINTS; i++) {
-               c->x86_capability[i] &= ~cpu_caps_cleared[i];
-               c->x86_capability[i] |= cpu_caps_set[i];
-       }
+       apply_forced_caps(c);
 
        /*
         * On SMP, boot_cpu_data holds the common feature set between
index bd9dcd6b712d0c09937facb3cdd41da866edae81..47416f959a48e304bba6a3d6ebbb3e978041c59a 100644 (file)
@@ -9,6 +9,7 @@
 #include <asm/pci-direct.h>
 #include <asm/tsc.h>
 #include <asm/cpufeature.h>
+#include <linux/sched.h>
 
 #include "cpu.h"
 
@@ -183,6 +184,7 @@ static void early_init_cyrix(struct cpuinfo_x86 *c)
                set_cpu_cap(c, X86_FEATURE_CYRIX_ARR);
                break;
        }
+       clear_sched_clock_stable();
 }
 
 static void init_cyrix(struct cpuinfo_x86 *c)
index 203f860d2ab3339c11ddc34d976f56ffa6ef069c..017ecd3bb5536ee5233b652a2e3c65dd8e4e8934 100644 (file)
@@ -15,6 +15,8 @@
 #include <asm/cpu.h>
 #include <asm/intel-family.h>
 #include <asm/microcode_intel.h>
+#include <asm/hwcap2.h>
+#include <asm/elf.h>
 
 #ifdef CONFIG_X86_64
 #include <linux/topology.h>
@@ -62,6 +64,46 @@ void check_mpx_erratum(struct cpuinfo_x86 *c)
        }
 }
 
+static bool ring3mwait_disabled __read_mostly;
+
+static int __init ring3mwait_disable(char *__unused)
+{
+       ring3mwait_disabled = true;
+       return 0;
+}
+__setup("ring3mwait=disable", ring3mwait_disable);
+
+static void probe_xeon_phi_r3mwait(struct cpuinfo_x86 *c)
+{
+       /*
+        * Ring 3 MONITOR/MWAIT feature cannot be detected without
+        * cpu model and family comparison.
+        */
+       if (c->x86 != 6)
+               return;
+       switch (c->x86_model) {
+       case INTEL_FAM6_XEON_PHI_KNL:
+       case INTEL_FAM6_XEON_PHI_KNM:
+               break;
+       default:
+               return;
+       }
+
+       if (ring3mwait_disabled) {
+               msr_clear_bit(MSR_MISC_FEATURE_ENABLES,
+                             MSR_MISC_FEATURE_ENABLES_RING3MWAIT_BIT);
+               return;
+       }
+
+       msr_set_bit(MSR_MISC_FEATURE_ENABLES,
+                   MSR_MISC_FEATURE_ENABLES_RING3MWAIT_BIT);
+
+       set_cpu_cap(c, X86_FEATURE_RING3MWAIT);
+
+       if (c == &boot_cpu_data)
+               ELF_HWCAP2 |= HWCAP2_RING3MWAIT;
+}
+
 static void early_init_intel(struct cpuinfo_x86 *c)
 {
        u64 misc_enable;
@@ -119,8 +161,10 @@ static void early_init_intel(struct cpuinfo_x86 *c)
        if (c->x86_power & (1 << 8)) {
                set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
                set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
-               if (!check_tsc_unstable())
-                       set_sched_clock_stable();
+               if (check_tsc_unstable())
+                       clear_sched_clock_stable();
+       } else {
+               clear_sched_clock_stable();
        }
 
        /* Penwell and Cloverview have the TSC which doesn't sleep on S3 */
@@ -560,6 +604,8 @@ static void init_intel(struct cpuinfo_x86 *c)
                detect_vmx_virtcap(c);
 
        init_intel_energy_perf(c);
+
+       probe_xeon_phi_r3mwait(c);
 }
 
 #ifdef CONFIG_X86_32
index 83f1a98d37dbc17608cbb72ffe3adc1c5b7e0548..2eee853796891460c5cb0dd20dc594697bdd6d1f 100644 (file)
@@ -52,8 +52,11 @@ void apei_mce_report_mem_error(int severity, struct cper_sec_mem_err *mem_err)
 
        if (severity >= GHES_SEV_RECOVERABLE)
                m.status |= MCI_STATUS_UC;
-       if (severity >= GHES_SEV_PANIC)
+
+       if (severity >= GHES_SEV_PANIC) {
                m.status |= MCI_STATUS_PCC;
+               m.tsc = rdtsc();
+       }
 
        m.addr = mem_err->physical_addr;
        mce_log(&m);
index 93d824ec3120ebfab0110f53a907e71a09c999dc..1e5a50c11d3c3546a59d286cbb84c402682cfb2b 100644 (file)
@@ -72,7 +72,7 @@ struct llist_node *mce_gen_pool_prepare_records(void)
        return new_head.first;
 }
 
-void mce_gen_pool_process(void)
+void mce_gen_pool_process(struct work_struct *__unused)
 {
        struct llist_node *head;
        struct mce_evt_llist *node, *tmp;
index 517619ea6498b41839f87f28bce4a76b1e43c7ab..99165b206df33ad80946ca9f9d4fdb1cf19be5e9 100644 (file)
@@ -152,7 +152,6 @@ static void raise_mce(struct mce *m)
        if (context == MCJ_CTX_RANDOM)
                return;
 
-#ifdef CONFIG_X86_LOCAL_APIC
        if (m->inject_flags & (MCJ_IRQ_BROADCAST | MCJ_NMI_BROADCAST)) {
                unsigned long start;
                int cpu;
@@ -192,9 +191,7 @@ static void raise_mce(struct mce *m)
                raise_local();
                put_cpu();
                put_online_cpus();
-       } else
-#endif
-       {
+       } else {
                preempt_disable();
                raise_local();
                preempt_enable();
index cd74a3f00aea8185c4e99eec72fb01bbaffb7306..903043e6a62b36a2c395c7aab52da6f85e34fc11 100644 (file)
@@ -31,7 +31,7 @@ struct mce_evt_llist {
        struct mce mce;
 };
 
-void mce_gen_pool_process(void);
+void mce_gen_pool_process(struct work_struct *__unused);
 bool mce_gen_pool_empty(void);
 int mce_gen_pool_add(struct mce *mce);
 int mce_gen_pool_init(void);
index 00ef43233e034b0cde9b2adc88b8003ddc42d00b..8e9725c607ea6acb7a91deed9b72b2c9a873803e 100644 (file)
@@ -128,7 +128,6 @@ void mce_setup(struct mce *m)
 {
        memset(m, 0, sizeof(struct mce));
        m->cpu = m->extcpu = smp_processor_id();
-       m->tsc = rdtsc();
        /* We hope get_seconds stays lockless */
        m->time = get_seconds();
        m->cpuvendor = boot_cpu_data.x86_vendor;
@@ -217,9 +216,7 @@ void mce_register_decode_chain(struct notifier_block *nb)
 {
        atomic_inc(&num_notifiers);
 
-       /* Ensure SRAO notifier has the highest priority in the decode chain. */
-       if (nb != &mce_srao_nb && nb->priority == INT_MAX)
-               nb->priority -= 1;
+       WARN_ON(nb->priority > MCE_PRIO_LOWEST && nb->priority < MCE_PRIO_EDAC);
 
        atomic_notifier_chain_register(&x86_mce_decoder_chain, nb);
 }
@@ -583,7 +580,7 @@ static int srao_decode_notifier(struct notifier_block *nb, unsigned long val,
 }
 static struct notifier_block mce_srao_nb = {
        .notifier_call  = srao_decode_notifier,
-       .priority = INT_MAX,
+       .priority       = MCE_PRIO_SRAO,
 };
 
 static int mce_default_notifier(struct notifier_block *nb, unsigned long val,
@@ -609,7 +606,7 @@ static int mce_default_notifier(struct notifier_block *nb, unsigned long val,
 static struct notifier_block mce_default_nb = {
        .notifier_call  = mce_default_notifier,
        /* lowest prio, we want it to run last. */
-       .priority       = 0,
+       .priority       = MCE_PRIO_LOWEST,
 };
 
 /*
@@ -710,14 +707,8 @@ bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
 
        mce_gather_info(&m, NULL);
 
-       /*
-        * m.tsc was set in mce_setup(). Clear it if not requested.
-        *
-        * FIXME: Propagate @flags to mce_gather_info/mce_setup() to avoid
-        *        that dance.
-        */
-       if (!(flags & MCP_TIMESTAMP))
-               m.tsc = 0;
+       if (flags & MCP_TIMESTAMP)
+               m.tsc = rdtsc();
 
        for (i = 0; i < mca_cfg.banks; i++) {
                if (!mce_banks[i].ctl || !test_bit(i, *b))
@@ -1156,6 +1147,7 @@ void do_machine_check(struct pt_regs *regs, long error_code)
                goto out;
 
        mce_gather_info(&m, regs);
+       m.tsc = rdtsc();
 
        final = this_cpu_ptr(&mces_seen);
        *final = m;
@@ -1321,41 +1313,6 @@ int memory_failure(unsigned long pfn, int vector, int flags)
 }
 #endif
 
-/*
- * Action optional processing happens here (picking up
- * from the list of faulting pages that do_machine_check()
- * placed into the genpool).
- */
-static void mce_process_work(struct work_struct *dummy)
-{
-       mce_gen_pool_process();
-}
-
-#ifdef CONFIG_X86_MCE_INTEL
-/***
- * mce_log_therm_throt_event - Logs the thermal throttling event to mcelog
- * @cpu: The CPU on which the event occurred.
- * @status: Event status information
- *
- * This function should be called by the thermal interrupt after the
- * event has been processed and the decision was made to log the event
- * further.
- *
- * The status parameter will be saved to the 'status' field of 'struct mce'
- * and historically has been the register value of the
- * MSR_IA32_THERMAL_STATUS (Intel) msr.
- */
-void mce_log_therm_throt_event(__u64 status)
-{
-       struct mce m;
-
-       mce_setup(&m);
-       m.bank = MCE_THERMAL_BANK;
-       m.status = status;
-       mce_log(&m);
-}
-#endif /* CONFIG_X86_MCE_INTEL */
-
 /*
  * Periodic polling timer for "silent" machine check errors.  If the
  * poller finds an MCE, poll 2x faster.  When the poller finds no more
@@ -1373,20 +1330,15 @@ static unsigned long mce_adjust_timer_default(unsigned long interval)
 
 static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default;
 
-static void __restart_timer(struct timer_list *t, unsigned long interval)
+static void __start_timer(struct timer_list *t, unsigned long interval)
 {
        unsigned long when = jiffies + interval;
        unsigned long flags;
 
        local_irq_save(flags);
 
-       if (timer_pending(t)) {
-               if (time_before(when, t->expires))
-                       mod_timer(t, when);
-       } else {
-               t->expires = round_jiffies(when);
-               add_timer_on(t, smp_processor_id());
-       }
+       if (!timer_pending(t) || time_before(when, t->expires))
+               mod_timer(t, round_jiffies(when));
 
        local_irq_restore(flags);
 }
@@ -1421,7 +1373,7 @@ static void mce_timer_fn(unsigned long data)
 
 done:
        __this_cpu_write(mce_next_interval, iv);
-       __restart_timer(t, iv);
+       __start_timer(t, iv);
 }
 
 /*
@@ -1432,7 +1384,7 @@ void mce_timer_kick(unsigned long interval)
        struct timer_list *t = this_cpu_ptr(&mce_timer);
        unsigned long iv = __this_cpu_read(mce_next_interval);
 
-       __restart_timer(t, interval);
+       __start_timer(t, interval);
 
        if (interval < iv)
                __this_cpu_write(mce_next_interval, interval);
@@ -1779,17 +1731,15 @@ static void __mcheck_cpu_clear_vendor(struct cpuinfo_x86 *c)
        }
 }
 
-static void mce_start_timer(unsigned int cpu, struct timer_list *t)
+static void mce_start_timer(struct timer_list *t)
 {
        unsigned long iv = check_interval * HZ;
 
        if (mca_cfg.ignore_ce || !iv)
                return;
 
-       per_cpu(mce_next_interval, cpu) = iv;
-
-       t->expires = round_jiffies(jiffies + iv);
-       add_timer_on(t, cpu);
+       this_cpu_write(mce_next_interval, iv);
+       __start_timer(t, iv);
 }
 
 static void __mcheck_cpu_setup_timer(void)
@@ -1806,7 +1756,7 @@ static void __mcheck_cpu_init_timer(void)
        unsigned int cpu = smp_processor_id();
 
        setup_pinned_timer(t, mce_timer_fn, cpu);
-       mce_start_timer(cpu, t);
+       mce_start_timer(t);
 }
 
 /* Handle unconfigured int18 (should never happen) */
@@ -2196,7 +2146,7 @@ int __init mcheck_init(void)
        mce_register_decode_chain(&mce_default_nb);
        mcheck_vendor_init_severity();
 
-       INIT_WORK(&mce_work, mce_process_work);
+       INIT_WORK(&mce_work, mce_gen_pool_process);
        init_irq_work(&mce_irq_work, mce_irq_work_cb);
 
        return 0;
@@ -2566,7 +2516,7 @@ static int mce_cpu_dead(unsigned int cpu)
 
 static int mce_cpu_online(unsigned int cpu)
 {
-       struct timer_list *t = &per_cpu(mce_timer, cpu);
+       struct timer_list *t = this_cpu_ptr(&mce_timer);
        int ret;
 
        mce_device_create(cpu);
@@ -2577,13 +2527,13 @@ static int mce_cpu_online(unsigned int cpu)
                return ret;
        }
        mce_reenable_cpu();
-       mce_start_timer(cpu, t);
+       mce_start_timer(t);
        return 0;
 }
 
 static int mce_cpu_pre_down(unsigned int cpu)
 {
-       struct timer_list *t = &per_cpu(mce_timer, cpu);
+       struct timer_list *t = this_cpu_ptr(&mce_timer);
 
        mce_disable_cpu();
        del_timer_sync(t);
index a5fd137417a27278f49f3cf0b60803997176a665..9e5427df3243430a752e4f1425d906de45a458e7 100644 (file)
@@ -192,6 +192,7 @@ static void get_smca_bank_info(unsigned int bank)
 
                        smca_banks[bank].hwid = s_hwid;
                        smca_banks[bank].id = instance_id;
+                       smca_banks[bank].sysfs_id = s_hwid->count++;
                        break;
                }
        }
@@ -777,7 +778,8 @@ __log_error(unsigned int bank, bool deferred_err, bool threshold_err, u64 misc)
        mce_setup(&m);
 
        m.status = status;
-       m.bank = bank;
+       m.bank   = bank;
+       m.tsc    = rdtsc();
 
        if (threshold_err)
                m.misc = misc;
@@ -1064,9 +1066,12 @@ static const char *get_name(unsigned int bank, struct threshold_block *b)
                return NULL;
        }
 
+       if (smca_banks[bank].hwid->count == 1)
+               return smca_get_name(bank_type);
+
        snprintf(buf_mcatype, MAX_MCATYPE_NAME_LEN,
                 "%s_%x", smca_get_name(bank_type),
-                         smca_banks[bank].id);
+                         smca_banks[bank].sysfs_id);
        return buf_mcatype;
 }
 
index 465aca8be009ff21b9863191a5b248d39ec642c7..85469f84c9214027aab98273ac952a94291173b1 100644 (file)
@@ -6,7 +6,7 @@
  *
  * Maintains a counter in /sys that keeps track of the number of thermal
  * events, such that the user knows how bad the thermal problem might be
- * (since the logging to syslog and mcelog is rate limited).
+ * (since the logging to syslog is rate limited).
  *
  * Author: Dmitriy Zavin (dmitriyz@google.com)
  *
@@ -141,13 +141,8 @@ static struct attribute_group thermal_attr_group = {
  * IRQ has been acknowledged.
  *
  * It will take care of rate limiting and printing messages to the syslog.
- *
- * Returns: 0 : Event should NOT be further logged, i.e. still in
- *              "timeout" from previous log message.
- *          1 : Event should be logged further, and a message has been
- *              printed to the syslog.
  */
-static int therm_throt_process(bool new_event, int event, int level)
+static void therm_throt_process(bool new_event, int event, int level)
 {
        struct _thermal_state *state;
        unsigned int this_cpu = smp_processor_id();
@@ -162,16 +157,16 @@ static int therm_throt_process(bool new_event, int event, int level)
                else if (event == POWER_LIMIT_EVENT)
                        state = &pstate->core_power_limit;
                else
-                        return 0;
+                       return;
        } else if (level == PACKAGE_LEVEL) {
                if (event == THERMAL_THROTTLING_EVENT)
                        state = &pstate->package_throttle;
                else if (event == POWER_LIMIT_EVENT)
                        state = &pstate->package_power_limit;
                else
-                       return 0;
+                       return;
        } else
-               return 0;
+               return;
 
        old_event = state->new_event;
        state->new_event = new_event;
@@ -181,7 +176,7 @@ static int therm_throt_process(bool new_event, int event, int level)
 
        if (time_before64(now, state->next_check) &&
                        state->count != state->last_count)
-               return 0;
+               return;
 
        state->next_check = now + CHECK_INTERVAL;
        state->last_count = state->count;
@@ -193,16 +188,14 @@ static int therm_throt_process(bool new_event, int event, int level)
                                this_cpu,
                                level == CORE_LEVEL ? "Core" : "Package",
                                state->count);
-               return 1;
+               return;
        }
        if (old_event) {
                if (event == THERMAL_THROTTLING_EVENT)
                        pr_info("CPU%d: %s temperature/speed normal\n", this_cpu,
                                level == CORE_LEVEL ? "Core" : "Package");
-               return 1;
+               return;
        }
-
-       return 0;
 }
 
 static int thresh_event_valid(int level, int event)
@@ -365,10 +358,9 @@ static void intel_thermal_interrupt(void)
        /* Check for violation of core thermal thresholds*/
        notify_thresholds(msr_val);
 
-       if (therm_throt_process(msr_val & THERM_STATUS_PROCHOT,
-                               THERMAL_THROTTLING_EVENT,
-                               CORE_LEVEL) != 0)
-               mce_log_therm_throt_event(msr_val);
+       therm_throt_process(msr_val & THERM_STATUS_PROCHOT,
+                           THERMAL_THROTTLING_EVENT,
+                           CORE_LEVEL);
 
        if (this_cpu_has(X86_FEATURE_PLN) && int_pln_enable)
                therm_throt_process(msr_val & THERM_STATUS_POWER_LIMIT,
index 6a31e2691f3aa0ac68620459c371ca42912c4475..7889ae492af020ee2ab546f19b9fe4b509b09d3a 100644 (file)
@@ -42,16 +42,19 @@ static struct equiv_cpu_entry *equiv_cpu_table;
 
 /*
  * This points to the current valid container of microcode patches which we will
- * save from the initrd/builtin before jettisoning its contents.
+ * save from the initrd/builtin before jettisoning its contents. @mc is the
+ * microcode patch we found to match.
  */
-struct container {
-       u8 *data;
-       size_t size;
-} cont;
+struct cont_desc {
+       struct microcode_amd *mc;
+       u32                  cpuid_1_eax;
+       u32                  psize;
+       u8                   *data;
+       size_t               size;
+};
 
 static u32 ucode_new_rev;
 static u8 amd_ucode_patch[PATCH_MAX_SIZE];
-static u16 this_equiv_id;
 
 /*
  * Microcode patch container file is prepended to the initrd in cpio
@@ -60,57 +63,13 @@ static u16 this_equiv_id;
 static const char
 ucode_path[] __maybe_unused = "kernel/x86/microcode/AuthenticAMD.bin";
 
-static size_t compute_container_size(u8 *data, u32 total_size)
+static u16 find_equiv_id(struct equiv_cpu_entry *equiv_table, u32 sig)
 {
-       size_t size = 0;
-       u32 *header = (u32 *)data;
-
-       if (header[0] != UCODE_MAGIC ||
-           header[1] != UCODE_EQUIV_CPU_TABLE_TYPE || /* type */
-           header[2] == 0)                            /* size */
-               return size;
-
-       size = header[2] + CONTAINER_HDR_SZ;
-       total_size -= size;
-       data += size;
-
-       while (total_size) {
-               u16 patch_size;
-
-               header = (u32 *)data;
-
-               if (header[0] != UCODE_UCODE_TYPE)
-                       break;
-
-               /*
-                * Sanity-check patch size.
-                */
-               patch_size = header[1];
-               if (patch_size > PATCH_MAX_SIZE)
-                       break;
-
-               size       += patch_size + SECTION_HDR_SIZE;
-               data       += patch_size + SECTION_HDR_SIZE;
-               total_size -= patch_size + SECTION_HDR_SIZE;
+       for (; equiv_table && equiv_table->installed_cpu; equiv_table++) {
+               if (sig == equiv_table->installed_cpu)
+                       return equiv_table->equiv_cpu;
        }
 
-       return size;
-}
-
-static inline u16 find_equiv_id(struct equiv_cpu_entry *equiv_cpu_table,
-                               unsigned int sig)
-{
-       int i = 0;
-
-       if (!equiv_cpu_table)
-               return 0;
-
-       while (equiv_cpu_table[i].installed_cpu != 0) {
-               if (sig == equiv_cpu_table[i].installed_cpu)
-                       return equiv_cpu_table[i].equiv_cpu;
-
-               i++;
-       }
        return 0;
 }
 
@@ -118,91 +77,109 @@ static inline u16 find_equiv_id(struct equiv_cpu_entry *equiv_cpu_table,
  * This scans the ucode blob for the proper container as we can have multiple
  * containers glued together. Returns the equivalence ID from the equivalence
  * table or 0 if none found.
+ * Returns the amount of bytes consumed while scanning. @desc contains all the
+ * data we're going to use in later stages of the application.
  */
-static u16
-find_proper_container(u8 *ucode, size_t size, struct container *ret_cont)
+static ssize_t parse_container(u8 *ucode, ssize_t size, struct cont_desc *desc)
 {
-       struct container ret = { NULL, 0 };
-       u32 eax, ebx, ecx, edx;
        struct equiv_cpu_entry *eq;
-       int offset, left;
-       u16 eq_id = 0;
-       u32 *header;
-       u8 *data;
+       ssize_t orig_size = size;
+       u32 *hdr = (u32 *)ucode;
+       u16 eq_id;
+       u8 *buf;
 
-       data   = ucode;
-       left   = size;
-       header = (u32 *)data;
+       /* Am I looking at an equivalence table header? */
+       if (hdr[0] != UCODE_MAGIC ||
+           hdr[1] != UCODE_EQUIV_CPU_TABLE_TYPE ||
+           hdr[2] == 0)
+               return CONTAINER_HDR_SZ;
 
+       buf = ucode;
 
-       /* find equiv cpu table */
-       if (header[0] != UCODE_MAGIC ||
-           header[1] != UCODE_EQUIV_CPU_TABLE_TYPE || /* type */
-           header[2] == 0)                            /* size */
-               return eq_id;
+       eq = (struct equiv_cpu_entry *)(buf + CONTAINER_HDR_SZ);
 
-       eax = 0x00000001;
-       ecx = 0;
-       native_cpuid(&eax, &ebx, &ecx, &edx);
+       /* Find the equivalence ID of our CPU in this table: */
+       eq_id = find_equiv_id(eq, desc->cpuid_1_eax);
 
-       while (left > 0) {
-               eq = (struct equiv_cpu_entry *)(data + CONTAINER_HDR_SZ);
+       buf  += hdr[2] + CONTAINER_HDR_SZ;
+       size -= hdr[2] + CONTAINER_HDR_SZ;
+
+       /*
+        * Scan through the rest of the container to find where it ends. We do
+        * some basic sanity-checking too.
+        */
+       while (size > 0) {
+               struct microcode_amd *mc;
+               u32 patch_size;
 
-               ret.data = data;
+               hdr = (u32 *)buf;
 
-               /* Advance past the container header */
-               offset = header[2] + CONTAINER_HDR_SZ;
-               data  += offset;
-               left  -= offset;
+               if (hdr[0] != UCODE_UCODE_TYPE)
+                       break;
 
-               eq_id = find_equiv_id(eq, eax);
-               if (eq_id) {
-                       ret.size = compute_container_size(ret.data, left + offset);
+               /* Sanity-check patch size. */
+               patch_size = hdr[1];
+               if (patch_size > PATCH_MAX_SIZE)
+                       break;
 
-                       /*
-                        * truncate how much we need to iterate over in the
-                        * ucode update loop below
-                        */
-                       left = ret.size - offset;
+               /* Skip patch section header: */
+               buf  += SECTION_HDR_SIZE;
+               size -= SECTION_HDR_SIZE;
 
-                       *ret_cont = ret;
-                       return eq_id;
+               mc = (struct microcode_amd *)buf;
+               if (eq_id == mc->hdr.processor_rev_id) {
+                       desc->psize = patch_size;
+                       desc->mc = mc;
                }
 
-               /*
-                * support multiple container files appended together. if this
-                * one does not have a matching equivalent cpu entry, we fast
-                * forward to the next container file.
-                */
-               while (left > 0) {
-                       header = (u32 *)data;
-
-                       if (header[0] == UCODE_MAGIC &&
-                           header[1] == UCODE_EQUIV_CPU_TABLE_TYPE)
-                               break;
-
-                       offset = header[1] + SECTION_HDR_SIZE;
-                       data  += offset;
-                       left  -= offset;
-               }
+               buf  += patch_size;
+               size -= patch_size;
+       }
 
-               /* mark where the next microcode container file starts */
-               offset    = data - (u8 *)ucode;
-               ucode     = data;
+       /*
+        * If we have found a patch (desc->mc), it means we're looking at the
+        * container which has a patch for this CPU so return 0 to mean, @ucode
+        * already points to the proper container. Otherwise, we return the size
+        * we scanned so that we can advance to the next container in the
+        * buffer.
+        */
+       if (desc->mc) {
+               desc->data = ucode;
+               desc->size = orig_size - size;
+
+               return 0;
        }
 
-       return eq_id;
+       return orig_size - size;
 }
 
-static int __apply_microcode_amd(struct microcode_amd *mc_amd)
+/*
+ * Scan the ucode blob for the proper container as we can have multiple
+ * containers glued together.
+ */
+static void scan_containers(u8 *ucode, size_t size, struct cont_desc *desc)
+{
+       ssize_t rem = size;
+
+       while (rem >= 0) {
+               ssize_t s = parse_container(ucode, rem, desc);
+               if (!s)
+                       return;
+
+               ucode += s;
+               rem   -= s;
+       }
+}
+
+static int __apply_microcode_amd(struct microcode_amd *mc)
 {
        u32 rev, dummy;
 
-       native_wrmsrl(MSR_AMD64_PATCH_LOADER, (u64)(long)&mc_amd->hdr.data_code);
+       native_wrmsrl(MSR_AMD64_PATCH_LOADER, (u64)(long)&mc->hdr.data_code);
 
        /* verify patch application was successful */
        native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
-       if (rev != mc_amd->hdr.patch_id)
+       if (rev != mc->hdr.patch_id)
                return -1;
 
        return 0;
@@ -217,17 +194,16 @@ static int __apply_microcode_amd(struct microcode_amd *mc_amd)
  * load_microcode_amd() to save equivalent cpu table and microcode patches in
  * kernel heap memory.
  *
- * Returns true if container found (sets @ret_cont), false otherwise.
+ * Returns true if container found (sets @desc), false otherwise.
  */
-static bool apply_microcode_early_amd(void *ucode, size_t size, bool save_patch,
-                                     struct container *ret_cont)
+static bool
+apply_microcode_early_amd(u32 cpuid_1_eax, void *ucode, size_t size, bool save_patch)
 {
+       struct cont_desc desc = { 0 };
        u8 (*patch)[PATCH_MAX_SIZE];
-       u32 rev, *header, *new_rev;
-       struct container ret;
-       int offset, left;
-       u16 eq_id = 0;
-       u8  *data;
+       struct microcode_amd *mc;
+       u32 rev, dummy, *new_rev;
+       bool ret = false;
 
 #ifdef CONFIG_X86_32
        new_rev = (u32 *)__pa_nodebug(&ucode_new_rev);
@@ -237,50 +213,27 @@ static bool apply_microcode_early_amd(void *ucode, size_t size, bool save_patch,
        patch   = &amd_ucode_patch;
 #endif
 
-       if (check_current_patch_level(&rev, true))
-               return false;
-
-       eq_id = find_proper_container(ucode, size, &ret);
-       if (!eq_id)
-               return false;
-
-       this_equiv_id = eq_id;
-       header = (u32 *)ret.data;
-
-       /* We're pointing to an equiv table, skip over it. */
-       data = ret.data +  header[2] + CONTAINER_HDR_SZ;
-       left = ret.size - (header[2] + CONTAINER_HDR_SZ);
-
-       while (left > 0) {
-               struct microcode_amd *mc;
-
-               header = (u32 *)data;
-               if (header[0] != UCODE_UCODE_TYPE || /* type */
-                   header[1] == 0)                  /* size */
-                       break;
+       desc.cpuid_1_eax = cpuid_1_eax;
 
-               mc = (struct microcode_amd *)(data + SECTION_HDR_SIZE);
+       scan_containers(ucode, size, &desc);
 
-               if (eq_id == mc->hdr.processor_rev_id && rev < mc->hdr.patch_id) {
+       mc = desc.mc;
+       if (!mc)
+               return ret;
 
-                       if (!__apply_microcode_amd(mc)) {
-                               rev = mc->hdr.patch_id;
-                               *new_rev = rev;
+       native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
+       if (rev >= mc->hdr.patch_id)
+               return ret;
 
-                               if (save_patch)
-                                       memcpy(patch, mc, min_t(u32, header[1], PATCH_MAX_SIZE));
-                       }
-               }
+       if (!__apply_microcode_amd(mc)) {
+               *new_rev = mc->hdr.patch_id;
+               ret      = true;
 
-               offset  = header[1] + SECTION_HDR_SIZE;
-               data   += offset;
-               left   -= offset;
+               if (save_patch)
+                       memcpy(patch, mc, min_t(u32, desc.psize, PATCH_MAX_SIZE));
        }
 
-       if (ret_cont)
-               *ret_cont = ret;
-
-       return true;
+       return ret;
 }
 
 static bool get_builtin_microcode(struct cpio_data *cp, unsigned int family)
@@ -298,10 +251,9 @@ static bool get_builtin_microcode(struct cpio_data *cp, unsigned int family)
 #endif
 }
 
-void __init load_ucode_amd_bsp(unsigned int family)
+void __load_ucode_amd(unsigned int cpuid_1_eax, struct cpio_data *ret)
 {
        struct ucode_cpu_info *uci;
-       u32 eax, ebx, ecx, edx;
        struct cpio_data cp;
        const char *path;
        bool use_pa;
@@ -316,183 +268,95 @@ void __init load_ucode_amd_bsp(unsigned int family)
                use_pa  = false;
        }
 
-       if (!get_builtin_microcode(&cp, family))
+       if (!get_builtin_microcode(&cp, x86_family(cpuid_1_eax)))
                cp = find_microcode_in_initrd(path, use_pa);
 
-       if (!(cp.data && cp.size))
-               return;
-
-       /* Get BSP's CPUID.EAX(1), needed in load_microcode_amd() */
-       eax = 1;
-       ecx = 0;
-       native_cpuid(&eax, &ebx, &ecx, &edx);
-       uci->cpu_sig.sig = eax;
+       /* Needed in load_microcode_amd() */
+       uci->cpu_sig.sig = cpuid_1_eax;
 
-       apply_microcode_early_amd(cp.data, cp.size, true, NULL);
+       *ret = cp;
 }
 
-#ifdef CONFIG_X86_32
-/*
- * On 32-bit, since AP's early load occurs before paging is turned on, we
- * cannot traverse cpu_equiv_table and microcode_cache in kernel heap memory.
- * So during cold boot, AP will apply_ucode_in_initrd() just like the BSP.
- * In save_microcode_in_initrd_amd() BSP's patch is copied to amd_ucode_patch,
- * which is used upon resume from suspend.
- */
-void load_ucode_amd_ap(unsigned int family)
+void __init load_ucode_amd_bsp(unsigned int cpuid_1_eax)
 {
-       struct microcode_amd *mc;
-       struct cpio_data cp;
-
-       mc = (struct microcode_amd *)__pa_nodebug(amd_ucode_patch);
-       if (mc->hdr.patch_id && mc->hdr.processor_rev_id) {
-               __apply_microcode_amd(mc);
-               return;
-       }
-
-       if (!get_builtin_microcode(&cp, family))
-               cp = find_microcode_in_initrd((const char *)__pa_nodebug(ucode_path), true);
+       struct cpio_data cp = { };
 
+       __load_ucode_amd(cpuid_1_eax, &cp);
        if (!(cp.data && cp.size))
                return;
 
-       /*
-        * This would set amd_ucode_patch above so that the following APs can
-        * use it directly instead of going down this path again.
-        */
-       apply_microcode_early_amd(cp.data, cp.size, true, NULL);
+       apply_microcode_early_amd(cpuid_1_eax, cp.data, cp.size, true);
 }
-#else
-void load_ucode_amd_ap(unsigned int family)
+
+void load_ucode_amd_ap(unsigned int cpuid_1_eax)
 {
-       struct equiv_cpu_entry *eq;
        struct microcode_amd *mc;
-       u32 rev, eax;
-       u16 eq_id;
-
-       /* 64-bit runs with paging enabled, thus early==false. */
-       if (check_current_patch_level(&rev, false))
-               return;
-
-       /* First AP hasn't cached it yet, go through the blob. */
-       if (!cont.data) {
-               struct cpio_data cp = { NULL, 0, "" };
+       struct cpio_data cp;
+       u32 *new_rev, rev, dummy;
 
-               if (cont.size == -1)
-                       return;
+       if (IS_ENABLED(CONFIG_X86_32)) {
+               mc      = (struct microcode_amd *)__pa_nodebug(amd_ucode_patch);
+               new_rev = (u32 *)__pa_nodebug(&ucode_new_rev);
+       } else {
+               mc      = (struct microcode_amd *)amd_ucode_patch;
+               new_rev = &ucode_new_rev;
+       }
 
-reget:
-               if (!get_builtin_microcode(&cp, family)) {
-#ifdef CONFIG_BLK_DEV_INITRD
-                       cp = find_cpio_data(ucode_path, (void *)initrd_start,
-                                           initrd_end - initrd_start, NULL);
-#endif
-                       if (!(cp.data && cp.size)) {
-                               /*
-                                * Mark it so that other APs do not scan again
-                                * for no real reason and slow down boot
-                                * needlessly.
-                                */
-                               cont.size = -1;
-                               return;
-                       }
-               }
+       native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
 
-               if (!apply_microcode_early_amd(cp.data, cp.size, false, &cont)) {
-                       cont.size = -1;
+       /* Check whether we have saved a new patch already: */
+       if (*new_rev && rev < mc->hdr.patch_id) {
+               if (!__apply_microcode_amd(mc)) {
+                       *new_rev = mc->hdr.patch_id;
                        return;
                }
        }
 
-       eax = cpuid_eax(0x00000001);
-       eq  = (struct equiv_cpu_entry *)(cont.data + CONTAINER_HDR_SZ);
-
-       eq_id = find_equiv_id(eq, eax);
-       if (!eq_id)
+       __load_ucode_amd(cpuid_1_eax, &cp);
+       if (!(cp.data && cp.size))
                return;
 
-       if (eq_id == this_equiv_id) {
-               mc = (struct microcode_amd *)amd_ucode_patch;
-
-               if (mc && rev < mc->hdr.patch_id) {
-                       if (!__apply_microcode_amd(mc))
-                               ucode_new_rev = mc->hdr.patch_id;
-               }
-
-       } else {
-
-               /*
-                * AP has a different equivalence ID than BSP, looks like
-                * mixed-steppings silicon so go through the ucode blob anew.
-                */
-               goto reget;
-       }
+       apply_microcode_early_amd(cpuid_1_eax, cp.data, cp.size, false);
 }
-#endif /* CONFIG_X86_32 */
 
 static enum ucode_state
 load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size);
 
-int __init save_microcode_in_initrd_amd(unsigned int fam)
+int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax)
 {
+       struct cont_desc desc = { 0 };
        enum ucode_state ret;
-       int retval = 0;
-       u16 eq_id;
-
-       if (!cont.data) {
-               if (IS_ENABLED(CONFIG_X86_32) && (cont.size != -1)) {
-                       struct cpio_data cp = { NULL, 0, "" };
-
-#ifdef CONFIG_BLK_DEV_INITRD
-                       cp = find_cpio_data(ucode_path, (void *)initrd_start,
-                                           initrd_end - initrd_start, NULL);
-#endif
+       struct cpio_data cp;
 
-                       if (!(cp.data && cp.size)) {
-                               cont.size = -1;
-                               return -EINVAL;
-                       }
+       cp = find_microcode_in_initrd(ucode_path, false);
+       if (!(cp.data && cp.size))
+               return -EINVAL;
 
-                       eq_id = find_proper_container(cp.data, cp.size, &cont);
-                       if (!eq_id) {
-                               cont.size = -1;
-                               return -EINVAL;
-                       }
+       desc.cpuid_1_eax = cpuid_1_eax;
 
-               } else
-                       return -EINVAL;
-       }
+       scan_containers(cp.data, cp.size, &desc);
+       if (!desc.mc)
+               return -EINVAL;
 
-       ret = load_microcode_amd(smp_processor_id(), fam, cont.data, cont.size);
+       ret = load_microcode_amd(smp_processor_id(), x86_family(cpuid_1_eax),
+                                desc.data, desc.size);
        if (ret != UCODE_OK)
-               retval = -EINVAL;
-
-       /*
-        * This will be freed any msec now, stash patches for the current
-        * family and switch to patch cache for cpu hotplug, etc later.
-        */
-       cont.data = NULL;
-       cont.size = 0;
+               return -EINVAL;
 
-       return retval;
+       return 0;
 }
 
 void reload_ucode_amd(void)
 {
        struct microcode_amd *mc;
-       u32 rev;
-
-       /*
-        * early==false because this is a syscore ->resume path and by
-        * that time paging is long enabled.
-        */
-       if (check_current_patch_level(&rev, false))
-               return;
+       u32 rev, dummy;
 
        mc = (struct microcode_amd *)amd_ucode_patch;
        if (!mc)
                return;
 
+       rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
+
        if (rev < mc->hdr.patch_id) {
                if (!__apply_microcode_amd(mc)) {
                        ucode_new_rev = mc->hdr.patch_id;
@@ -630,60 +494,13 @@ static unsigned int verify_patch_size(u8 family, u32 patch_size,
        return patch_size;
 }
 
-/*
- * Those patch levels cannot be updated to newer ones and thus should be final.
- */
-static u32 final_levels[] = {
-       0x01000098,
-       0x0100009f,
-       0x010000af,
-       0, /* T-101 terminator */
-};
-
-/*
- * Check the current patch level on this CPU.
- *
- * @rev: Use it to return the patch level. It is set to 0 in the case of
- * error.
- *
- * Returns:
- *  - true: if update should stop
- *  - false: otherwise
- */
-bool check_current_patch_level(u32 *rev, bool early)
-{
-       u32 lvl, dummy, i;
-       bool ret = false;
-       u32 *levels;
-
-       native_rdmsr(MSR_AMD64_PATCH_LEVEL, lvl, dummy);
-
-       if (IS_ENABLED(CONFIG_X86_32) && early)
-               levels = (u32 *)__pa_nodebug(&final_levels);
-       else
-               levels = final_levels;
-
-       for (i = 0; levels[i]; i++) {
-               if (lvl == levels[i]) {
-                       lvl = 0;
-                       ret = true;
-                       break;
-               }
-       }
-
-       if (rev)
-               *rev = lvl;
-
-       return ret;
-}
-
 static int apply_microcode_amd(int cpu)
 {
        struct cpuinfo_x86 *c = &cpu_data(cpu);
        struct microcode_amd *mc_amd;
        struct ucode_cpu_info *uci;
        struct ucode_patch *p;
-       u32 rev;
+       u32 rev, dummy;
 
        BUG_ON(raw_smp_processor_id() != cpu);
 
@@ -696,8 +513,7 @@ static int apply_microcode_amd(int cpu)
        mc_amd  = p->data;
        uci->mc = p->data;
 
-       if (check_current_patch_level(&rev, false))
-               return -1;
+       rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
 
        /* need to apply patch? */
        if (rev >= mc_amd->hdr.patch_id) {
index 2af69d27da629a5c802498e692300f9980862a2a..b4a4cd39b35829c73c20be28cda3bb58201d0f55 100644 (file)
@@ -46,6 +46,8 @@
 static struct microcode_ops    *microcode_ops;
 static bool dis_ucode_ldr = true;
 
+bool initrd_gone;
+
 LIST_HEAD(microcode_cache);
 
 /*
@@ -64,19 +66,50 @@ static DEFINE_MUTEX(microcode_mutex);
 
 struct ucode_cpu_info          ucode_cpu_info[NR_CPUS];
 
-/*
- * Operations that are run on a target cpu:
- */
-
 struct cpu_info_ctx {
        struct cpu_signature    *cpu_sig;
        int                     err;
 };
 
+/*
+ * Those patch levels cannot be updated to newer ones and thus should be final.
+ */
+static u32 final_levels[] = {
+       0x01000098,
+       0x0100009f,
+       0x010000af,
+       0, /* T-101 terminator */
+};
+
+/*
+ * Check the current patch level on this CPU.
+ *
+ * Returns:
+ *  - true: if update should stop
+ *  - false: otherwise
+ */
+static bool amd_check_current_patch_level(void)
+{
+       u32 lvl, dummy, i;
+       u32 *levels;
+
+       native_rdmsr(MSR_AMD64_PATCH_LEVEL, lvl, dummy);
+
+       if (IS_ENABLED(CONFIG_X86_32))
+               levels = (u32 *)__pa_nodebug(&final_levels);
+       else
+               levels = final_levels;
+
+       for (i = 0; levels[i]; i++) {
+               if (lvl == levels[i])
+                       return true;
+       }
+       return false;
+}
+
 static bool __init check_loader_disabled_bsp(void)
 {
        static const char *__dis_opt_str = "dis_ucode_ldr";
-       u32 a, b, c, d;
 
 #ifdef CONFIG_X86_32
        const char *cmdline = (const char *)__pa_nodebug(boot_command_line);
@@ -92,18 +125,19 @@ static bool __init check_loader_disabled_bsp(void)
        if (!have_cpuid_p())
                return *res;
 
-       a = 1;
-       c = 0;
-       native_cpuid(&a, &b, &c, &d);
-
        /*
         * CPUID(1).ECX[31]: reserved for hypervisor use. This is still not
         * completely accurate as xen pv guests don't see that CPUID bit set but
         * that's good enough as they don't land on the BSP path anyway.
         */
-       if (c & BIT(31))
+       if (native_cpuid_ecx(1) & BIT(31))
                return *res;
 
+       if (x86_cpuid_vendor() == X86_VENDOR_AMD) {
+               if (amd_check_current_patch_level())
+                       return *res;
+       }
+
        if (cmdline_find_option_bool(cmdline, option) <= 0)
                *res = false;
 
@@ -131,23 +165,21 @@ bool get_builtin_firmware(struct cpio_data *cd, const char *name)
 
 void __init load_ucode_bsp(void)
 {
-       int vendor;
-       unsigned int family;
+       unsigned int cpuid_1_eax;
 
        if (check_loader_disabled_bsp())
                return;
 
-       vendor = x86_cpuid_vendor();
-       family = x86_cpuid_family();
+       cpuid_1_eax = native_cpuid_eax(1);
 
-       switch (vendor) {
+       switch (x86_cpuid_vendor()) {
        case X86_VENDOR_INTEL:
-               if (family >= 6)
+               if (x86_family(cpuid_1_eax) >= 6)
                        load_ucode_intel_bsp();
                break;
        case X86_VENDOR_AMD:
-               if (family >= 0x10)
-                       load_ucode_amd_bsp(family);
+               if (x86_family(cpuid_1_eax) >= 0x10)
+                       load_ucode_amd_bsp(cpuid_1_eax);
                break;
        default:
                break;
@@ -165,22 +197,21 @@ static bool check_loader_disabled_ap(void)
 
 void load_ucode_ap(void)
 {
-       int vendor, family;
+       unsigned int cpuid_1_eax;
 
        if (check_loader_disabled_ap())
                return;
 
-       vendor = x86_cpuid_vendor();
-       family = x86_cpuid_family();
+       cpuid_1_eax = native_cpuid_eax(1);
 
-       switch (vendor) {
+       switch (x86_cpuid_vendor()) {
        case X86_VENDOR_INTEL:
-               if (family >= 6)
+               if (x86_family(cpuid_1_eax) >= 6)
                        load_ucode_intel_ap();
                break;
        case X86_VENDOR_AMD:
-               if (family >= 0x10)
-                       load_ucode_amd_ap(family);
+               if (x86_family(cpuid_1_eax) >= 0x10)
+                       load_ucode_amd_ap(cpuid_1_eax);
                break;
        default:
                break;
@@ -190,21 +221,24 @@ void load_ucode_ap(void)
 static int __init save_microcode_in_initrd(void)
 {
        struct cpuinfo_x86 *c = &boot_cpu_data;
+       int ret = -EINVAL;
 
        switch (c->x86_vendor) {
        case X86_VENDOR_INTEL:
                if (c->x86 >= 6)
-                       return save_microcode_in_initrd_intel();
+                       ret = save_microcode_in_initrd_intel();
                break;
        case X86_VENDOR_AMD:
                if (c->x86 >= 0x10)
-                       return save_microcode_in_initrd_amd(c->x86);
+                       return save_microcode_in_initrd_amd(cpuid_eax(1));
                break;
        default:
                break;
        }
 
-       return -EINVAL;
+       initrd_gone = true;
+
+       return ret;
 }
 
 struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa)
@@ -247,9 +281,16 @@ struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa)
         * has the virtual address of the beginning of the initrd. It also
         * possibly relocates the ramdisk. In either case, initrd_start contains
         * the updated address so use that instead.
+        *
+        * initrd_gone is for the hotplug case where we've thrown out initrd
+        * already.
         */
-       if (!use_pa && initrd_start)
-               start = initrd_start;
+       if (!use_pa) {
+               if (initrd_gone)
+                       return (struct cpio_data){ NULL, 0, "" };
+               if (initrd_start)
+                       start = initrd_start;
+       }
 
        return find_cpio_data(path, (void *)start, size, NULL);
 #else /* !CONFIG_BLK_DEV_INITRD */
index 3f329b74e040c23b6b85dfd12a85f80d630c63ac..8325d8a09ab0768dd08156b8e4c5b755b78c10f9 100644 (file)
@@ -41,7 +41,7 @@
 
 static const char ucode_path[] = "kernel/x86/microcode/GenuineIntel.bin";
 
-/* Current microcode patch used in early patching */
+/* Current microcode patch used in early patching on the APs. */
 struct microcode_intel *intel_ucode_patch;
 
 static inline bool cpu_signatures_match(unsigned int s1, unsigned int p1,
@@ -607,12 +607,6 @@ int __init save_microcode_in_initrd_intel(void)
        struct ucode_cpu_info uci;
        struct cpio_data cp;
 
-       /*
-        * AP loading didn't find any microcode patch, no need to save anything.
-        */
-       if (!intel_ucode_patch || IS_ERR(intel_ucode_patch))
-               return 0;
-
        if (!load_builtin_intel_microcode(&cp))
                cp = find_microcode_in_initrd(ucode_path, false);
 
@@ -628,7 +622,6 @@ int __init save_microcode_in_initrd_intel(void)
        return 0;
 }
 
-
 /*
  * @res_patch, output: a pointer to the patch we found.
  */
index 34178564be2a70adbeaf3fd890cac32d97a51635..c1ea5b99983935ca3742bcdcf49821eaf740f77b 100644 (file)
@@ -1,4 +1,5 @@
 #include <linux/kernel.h>
+#include <linux/sched.h>
 #include <linux/mm.h>
 #include <asm/cpufeature.h>
 #include <asm/msr.h>
@@ -14,6 +15,8 @@ static void early_init_transmeta(struct cpuinfo_x86 *c)
                if (xlvl >= 0x80860001)
                        c->x86_capability[CPUID_8086_0001_EDX] = cpuid_edx(0x80860001);
        }
+
+       clear_sched_clock_stable();
 }
 
 static void init_transmeta(struct cpuinfo_x86 *c)
index 90e8dde3ec26b1d97d10309d2d796489078b5cdf..b2bbad6ebe4d8c5c822965134ea1daf4a6fe8adc 100644 (file)
@@ -580,24 +580,19 @@ static void __init update_e820_saved(void)
 }
 #define MAX_GAP_END 0x100000000ull
 /*
- * Search for a gap in the e820 memory space from start_addr to end_addr.
+ * Search for a gap in the e820 memory space from 0 to MAX_GAP_END.
  */
-__init int e820_search_gap(unsigned long *gapstart, unsigned long *gapsize,
-               unsigned long start_addr, unsigned long long end_addr)
+static int __init e820_search_gap(unsigned long *gapstart,
+               unsigned long *gapsize)
 {
-       unsigned long long last;
+       unsigned long long last = MAX_GAP_END;
        int i = e820->nr_map;
        int found = 0;
 
-       last = (end_addr && end_addr < MAX_GAP_END) ? end_addr : MAX_GAP_END;
-
        while (--i >= 0) {
                unsigned long long start = e820->map[i].addr;
                unsigned long long end = start + e820->map[i].size;
 
-               if (end < start_addr)
-                       continue;
-
                /*
                 * Since "last" is at most 4GB, we know we'll
                 * fit in 32 bits if this condition is true
@@ -628,18 +623,19 @@ __init void e820_setup_gap(void)
        unsigned long gapstart, gapsize;
        int found;
 
-       gapstart = 0x10000000;
        gapsize = 0x400000;
-       found  = e820_search_gap(&gapstart, &gapsize, 0, MAX_GAP_END);
+       found  = e820_search_gap(&gapstart, &gapsize);
 
-#ifdef CONFIG_X86_64
        if (!found) {
+#ifdef CONFIG_X86_64
                gapstart = (max_pfn << PAGE_SHIFT) + 1024*1024;
                printk(KERN_ERR
        "e820: cannot find a gap in the 32bit address range\n"
        "e820: PCI devices with unassigned 32bit BARs may break!\n");
-       }
+#else
+               gapstart = 0x10000000;
 #endif
+       }
 
        /*
         * e820_reserve_resources_late protect stolen RAM already
index e4e97a5355ce852ac49937fd180f62b614c1286c..e1114f070c2dfdedf9911cd587afb7cca1769785 100644 (file)
@@ -178,13 +178,8 @@ void fpstate_init(union fpregs_state *state)
 
        memset(state, 0, fpu_kernel_xstate_size);
 
-       /*
-        * XRSTORS requires that this bit is set in xcomp_bv, or
-        * it will #GP. Make sure it is replaced after the memset().
-        */
        if (static_cpu_has(X86_FEATURE_XSAVES))
-               state->xsave.header.xcomp_bv = XCOMP_BV_COMPACTED_FORMAT;
-
+               fpstate_init_xstate(&state->xsave);
        if (static_cpu_has(X86_FEATURE_FXSR))
                fpstate_init_fxstate(&state->fxsave);
        else
index 60dece392b3a7ab0ef67ce57edc10f79332686eb..19bdd1bf81607cfe06d57bf4d4e0b42b9088d691 100644 (file)
@@ -48,13 +48,7 @@ void fpu__init_cpu(void)
        fpu__init_cpu_xstate();
 }
 
-/*
- * The earliest FPU detection code.
- *
- * Set the X86_FEATURE_FPU CPU-capability bit based on
- * trying to execute an actual sequence of FPU instructions:
- */
-static void fpu__init_system_early_generic(struct cpuinfo_x86 *c)
+static bool fpu__probe_without_cpuid(void)
 {
        unsigned long cr0;
        u16 fsw, fcw;
@@ -65,18 +59,25 @@ static void fpu__init_system_early_generic(struct cpuinfo_x86 *c)
        cr0 &= ~(X86_CR0_TS | X86_CR0_EM);
        write_cr0(cr0);
 
-       if (!test_bit(X86_FEATURE_FPU, (unsigned long *)cpu_caps_cleared)) {
-               asm volatile("fninit ; fnstsw %0 ; fnstcw %1"
-                            : "+m" (fsw), "+m" (fcw));
+       asm volatile("fninit ; fnstsw %0 ; fnstcw %1" : "+m" (fsw), "+m" (fcw));
+
+       pr_info("x86/fpu: Probing for FPU: FSW=0x%04hx FCW=0x%04hx\n", fsw, fcw);
 
-               if (fsw == 0 && (fcw & 0x103f) == 0x003f)
-                       set_cpu_cap(c, X86_FEATURE_FPU);
+       return fsw == 0 && (fcw & 0x103f) == 0x003f;
+}
+
+static void fpu__init_system_early_generic(struct cpuinfo_x86 *c)
+{
+       if (!boot_cpu_has(X86_FEATURE_CPUID) &&
+           !test_bit(X86_FEATURE_FPU, (unsigned long *)cpu_caps_cleared)) {
+               if (fpu__probe_without_cpuid())
+                       setup_force_cpu_cap(X86_FEATURE_FPU);
                else
-                       clear_cpu_cap(c, X86_FEATURE_FPU);
+                       setup_clear_cpu_cap(X86_FEATURE_FPU);
        }
 
 #ifndef CONFIG_MATH_EMULATION
-       if (!boot_cpu_has(X86_FEATURE_FPU)) {
+       if (!test_cpu_cap(&boot_cpu_data, X86_FEATURE_FPU)) {
                pr_emerg("x86/fpu: Giving up, no FPU found and no math emulation present\n");
                for (;;)
                        asm volatile("hlt");
index 1d7770447b3ec1cd108a3ca8aec0bad3830fd3db..c24ac1efb12d7a1574450a1359699309148afcd6 100644 (file)
@@ -78,6 +78,7 @@ void fpu__xstate_clear_all_cpu_caps(void)
        setup_clear_cpu_cap(X86_FEATURE_PKU);
        setup_clear_cpu_cap(X86_FEATURE_AVX512_4VNNIW);
        setup_clear_cpu_cap(X86_FEATURE_AVX512_4FMAPS);
+       setup_clear_cpu_cap(X86_FEATURE_AVX512_VPOPCNTDQ);
 }
 
 /*
@@ -705,8 +706,14 @@ void __init fpu__init_system_xstate(void)
        WARN_ON_FPU(!on_boot_cpu);
        on_boot_cpu = 0;
 
+       if (!boot_cpu_has(X86_FEATURE_FPU)) {
+               pr_info("x86/fpu: No FPU detected\n");
+               return;
+       }
+
        if (!boot_cpu_has(X86_FEATURE_XSAVE)) {
-               pr_info("x86/fpu: Legacy x87 FPU detected.\n");
+               pr_info("x86/fpu: x87 FPU will use %s\n",
+                       boot_cpu_has(X86_FEATURE_FXSR) ? "FXSAVE" : "FSAVE");
                return;
        }
 
index f16c55bfc0907bc4a3b1f35174ca5f5cbc53ce99..e5fb436a6548a9111f95ce3afacf787f5bec278c 100644 (file)
@@ -49,3 +49,65 @@ asmlinkage __visible void __init i386_start_kernel(void)
 
        start_kernel();
 }
+
+/*
+ * Initialize page tables.  This creates a PDE and a set of page
+ * tables, which are located immediately beyond __brk_base.  The variable
+ * _brk_end is set up to point to the first "safe" location.
+ * Mappings are created both at virtual address 0 (identity mapping)
+ * and PAGE_OFFSET for up to _end.
+ *
+ * In PAE mode initial_page_table is statically defined to contain
+ * enough entries to cover the VMSPLIT option (that is the top 1, 2 or 3
+ * entries). The identity mapping is handled by pointing two PGD entries
+ * to the first kernel PMD. Note the upper half of each PMD or PTE are
+ * always zero at this stage.
+ */
+void __init mk_early_pgtbl_32(void)
+{
+#ifdef __pa
+#undef __pa
+#endif
+#define __pa(x)  ((unsigned long)(x) - PAGE_OFFSET)
+       pte_t pte, *ptep;
+       int i;
+       unsigned long *ptr;
+       /* Enough space to fit pagetables for the low memory linear map */
+       const unsigned long limit = __pa(_end) +
+               (PAGE_TABLE_SIZE(LOWMEM_PAGES) << PAGE_SHIFT);
+#ifdef CONFIG_X86_PAE
+       pmd_t pl2, *pl2p = (pmd_t *)__pa(initial_pg_pmd);
+#define SET_PL2(pl2, val)    { (pl2).pmd = (val); }
+#else
+       pgd_t pl2, *pl2p = (pgd_t *)__pa(initial_page_table);
+#define SET_PL2(pl2, val)   { (pl2).pgd = (val); }
+#endif
+
+       ptep = (pte_t *)__pa(__brk_base);
+       pte.pte = PTE_IDENT_ATTR;
+
+       while ((pte.pte & PTE_PFN_MASK) < limit) {
+
+               SET_PL2(pl2, (unsigned long)ptep | PDE_IDENT_ATTR);
+               *pl2p = pl2;
+#ifndef CONFIG_X86_PAE
+               /* Kernel PDE entry */
+               *(pl2p +  ((PAGE_OFFSET >> PGDIR_SHIFT))) = pl2;
+#endif
+               for (i = 0; i < PTRS_PER_PTE; i++) {
+                       *ptep = pte;
+                       pte.pte += PAGE_SIZE;
+                       ptep++;
+               }
+
+               pl2p++;
+       }
+
+       ptr = (unsigned long *)__pa(&max_pfn_mapped);
+       /* Can't use pte_pfn() since it's a call with CONFIG_PARAVIRT */
+       *ptr = (pte.pte & PTE_PFN_MASK) >> PAGE_SHIFT;
+
+       ptr = (unsigned long *)__pa(&_brk_end);
+       *ptr = (unsigned long)ptep + PAGE_OFFSET;
+}
+
index 4e8577d03372c747c61e254e093a8c2ae7e4298c..1f85ee8f9439fff69f6bc7a38523b38f88311cb0 100644 (file)
@@ -24,6 +24,7 @@
 #include <asm/nops.h>
 #include <asm/bootparam.h>
 #include <asm/export.h>
+#include <asm/pgtable_32.h>
 
 /* Physical address */
 #define pa(X) ((X) - __PAGE_OFFSET)
 #define X86_CAPABILITY new_cpu_data+CPUINFO_x86_capability
 #define X86_VENDOR_ID  new_cpu_data+CPUINFO_x86_vendor_id
 
-/*
- * This is how much memory in addition to the memory covered up to
- * and including _end we need mapped initially.
- * We need:
- *     (KERNEL_IMAGE_SIZE/4096) / 1024 pages (worst case, non PAE)
- *     (KERNEL_IMAGE_SIZE/4096) / 512 + 4 pages (worst case for PAE)
- *
- * Modulo rounding, each megabyte assigned here requires a kilobyte of
- * memory, which is currently unreclaimed.
- *
- * This should be a multiple of a page.
- *
- * KERNEL_IMAGE_SIZE should be greater than pa(_end)
- * and small than max_low_pfn, otherwise will waste some page table entries
- */
-
-#if PTRS_PER_PMD > 1
-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
-#else
-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
-#endif
 
 #define SIZEOF_PTREGS 17*4
 
-/*
- * Number of possible pages in the lowmem region.
- *
- * We shift 2 by 31 instead of 1 by 32 to the left in order to avoid a
- * gas warning about overflowing shift count when gas has been compiled
- * with only a host target support using a 32-bit type for internal
- * representation.
- */
-LOWMEM_PAGES = (((2<<31) - __PAGE_OFFSET) >> PAGE_SHIFT)
-
-/* Enough space to fit pagetables for the low memory linear map */
-MAPPING_BEYOND_END = PAGE_TABLE_SIZE(LOWMEM_PAGES) << PAGE_SHIFT
-
 /*
  * Worst-case size of the kernel mapping we need to make:
  * a relocatable kernel can live anywhere in lowmem, so we need to be able
@@ -160,90 +127,15 @@ ENTRY(startup_32)
        call load_ucode_bsp
 #endif
 
-/*
- * Initialize page tables.  This creates a PDE and a set of page
- * tables, which are located immediately beyond __brk_base.  The variable
- * _brk_end is set up to point to the first "safe" location.
- * Mappings are created both at virtual address 0 (identity mapping)
- * and PAGE_OFFSET for up to _end.
- */
-#ifdef CONFIG_X86_PAE
-
-       /*
-        * In PAE mode initial_page_table is statically defined to contain
-        * enough entries to cover the VMSPLIT option (that is the top 1, 2 or 3
-        * entries). The identity mapping is handled by pointing two PGD entries
-        * to the first kernel PMD.
-        *
-        * Note the upper half of each PMD or PTE are always zero at this stage.
-        */
-
-#define KPMDS (((-__PAGE_OFFSET) >> 30) & 3) /* Number of kernel PMDs */
-
-       xorl %ebx,%ebx                          /* %ebx is kept at zero */
-
-       movl $pa(__brk_base), %edi
-       movl $pa(initial_pg_pmd), %edx
-       movl $PTE_IDENT_ATTR, %eax
-10:
-       leal PDE_IDENT_ATTR(%edi),%ecx          /* Create PMD entry */
-       movl %ecx,(%edx)                        /* Store PMD entry */
-                                               /* Upper half already zero */
-       addl $8,%edx
-       movl $512,%ecx
-11:
-       stosl
-       xchgl %eax,%ebx
-       stosl
-       xchgl %eax,%ebx
-       addl $0x1000,%eax
-       loop 11b
-
-       /*
-        * End condition: we must map up to the end + MAPPING_BEYOND_END.
-        */
-       movl $pa(_end) + MAPPING_BEYOND_END + PTE_IDENT_ATTR, %ebp
-       cmpl %ebp,%eax
-       jb 10b
-1:
-       addl $__PAGE_OFFSET, %edi
-       movl %edi, pa(_brk_end)
-       shrl $12, %eax
-       movl %eax, pa(max_pfn_mapped)
+       /* Create early pagetables. */
+       call  mk_early_pgtbl_32
 
        /* Do early initialization of the fixmap area */
        movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
+#ifdef  CONFIG_X86_PAE
+#define KPMDS (((-__PAGE_OFFSET) >> 30) & 3) /* Number of kernel PMDs */
        movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
-#else  /* Not PAE */
-
-page_pde_offset = (__PAGE_OFFSET >> 20);
-
-       movl $pa(__brk_base), %edi
-       movl $pa(initial_page_table), %edx
-       movl $PTE_IDENT_ATTR, %eax
-10:
-       leal PDE_IDENT_ATTR(%edi),%ecx          /* Create PDE entry */
-       movl %ecx,(%edx)                        /* Store identity PDE entry */
-       movl %ecx,page_pde_offset(%edx)         /* Store kernel PDE entry */
-       addl $4,%edx
-       movl $1024, %ecx
-11:
-       stosl
-       addl $0x1000,%eax
-       loop 11b
-       /*
-        * End condition: we must map up to the end + MAPPING_BEYOND_END.
-        */
-       movl $pa(_end) + MAPPING_BEYOND_END + PTE_IDENT_ATTR, %ebp
-       cmpl %ebp,%eax
-       jb 10b
-       addl $__PAGE_OFFSET, %edi
-       movl %edi, pa(_brk_end)
-       shrl $12, %eax
-       movl %eax, pa(max_pfn_mapped)
-
-       /* Do early initialization of the fixmap area */
-       movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
+#else
        movl %eax,pa(initial_page_table+0xffc)
 #endif
 
@@ -666,6 +558,7 @@ ENTRY(setup_once_ref)
 __PAGE_ALIGNED_BSS
        .align PAGE_SIZE
 #ifdef CONFIG_X86_PAE
+.globl initial_pg_pmd
 initial_pg_pmd:
        .fill 1024*KPMDS,4,0
 #else
index 85e87b46c318026ed28d87056c516aec3e5fb9ed..dc6ba5bda9fc83630c773a80c4adea6871db0a59 100644 (file)
@@ -352,6 +352,7 @@ static int hpet_resume(struct clock_event_device *evt, int timer)
        } else {
                struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt);
 
+               irq_domain_deactivate_irq(irq_get_irq_data(hdev->irq));
                irq_domain_activate_irq(irq_get_irq_data(hdev->irq));
                disable_irq(hdev->irq);
                irq_set_affinity(hdev->irq, cpumask_of(hdev->cpu));
index cb9c1ed1d3919574ff1e54cc2a0ee66a8d0c36bf..f73f475d057339ce26283c0423d9b77a78442064 100644 (file)
@@ -132,10 +132,8 @@ int sched_set_itmt_support(void)
 
        sysctl_sched_itmt_enabled = 1;
 
-       if (sysctl_sched_itmt_enabled) {
-               x86_topology_update = true;
-               rebuild_sched_domains();
-       }
+       x86_topology_update = true;
+       rebuild_sched_domains();
 
        mutex_unlock(&itmt_update_mutex);
 
index fc25f698d792faed00f461b0a378f30c35e3eb3d..c37bd0f39c708bc0b36da99574b0188597e0c6a4 100644 (file)
@@ -32,8 +32,7 @@ static void bug_at(unsigned char *ip, int line)
         * Something went wrong. Crash the box, as something could be
         * corrupting the kernel.
         */
-       pr_warning("Unexpected op at %pS [%p] (%02x %02x %02x %02x %02x) %s:%d\n",
-              ip, ip, ip[0], ip[1], ip[2], ip[3], ip[4], __FILE__, line);
+       pr_crit("jump_label: Fatal kernel bug, unexpected op at %pS [%p] (%5ph) %d\n", ip, ip, ip, line);
        BUG();
 }
 
index eb3509338ae01ac69097d3decb922075ab63cb6a..520b8dfe164026cf53b762f87b8f507401384969 100644 (file)
@@ -745,7 +745,7 @@ __visible __used void *trampoline_handler(struct pt_regs *regs)
         *       will be the real return address, and all the rest will
         *       point to kretprobe_trampoline.
         */
-       hlist_for_each_entry_safe(ri, tmp, head, hlist) {
+       hlist_for_each_entry(ri, head, hlist) {
                if (ri->task != current)
                        /* another task is sharing our hash bucket */
                        continue;
index 36bc664160215bb33fce816205e184b657aa852d..099fcba4981d8821059ed250fda39c7dcd040895 100644 (file)
@@ -620,18 +620,4 @@ void __init kvm_spinlock_init(void)
        }
 }
 
-static __init int kvm_spinlock_init_jump(void)
-{
-       if (!kvm_para_available())
-               return 0;
-       if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
-               return 0;
-
-       static_key_slow_inc(&paravirt_ticketlocks_enabled);
-       printk(KERN_INFO "KVM setup paravirtual spinlock\n");
-
-       return 0;
-}
-early_initcall(kvm_spinlock_init_jump);
-
 #endif /* CONFIG_PARAVIRT_SPINLOCKS */
index 2a5cafdf8808eee394039c30f0700c234e4a615b..542710b99f52c7b0d64368776427393cea66d596 100644 (file)
@@ -107,12 +107,12 @@ static inline void kvm_sched_clock_init(bool stable)
 {
        if (!stable) {
                pv_time_ops.sched_clock = kvm_clock_read;
+               clear_sched_clock_stable();
                return;
        }
 
        kvm_sched_clock_offset = kvm_clock_read();
        pv_time_ops.sched_clock = kvm_sched_clock_read;
-       set_sched_clock_stable();
 
        printk(KERN_INFO "kvm-clock: using sched offset of %llu cycles\n",
                        kvm_sched_clock_offset);
index 6d4bf812af45d9ed144ee33faf9f906ed9e107c6..6259327f34547e4270d1d2aec04505f1197d16f9 100644 (file)
@@ -42,6 +42,3 @@ struct pv_lock_ops pv_lock_ops = {
 #endif /* SMP */
 };
 EXPORT_SYMBOL(pv_lock_ops);
-
-struct static_key paravirt_ticketlocks_enabled = STATIC_KEY_INIT_FALSE;
-EXPORT_SYMBOL(paravirt_ticketlocks_enabled);
index 5d400ba1349df2c87bd40758578bd49122426b61..d47517941bbc03ee288848561c54b7f791a97e76 100644 (file)
@@ -296,7 +296,7 @@ static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
 
        /* were we called with bad_dma_address? */
        badend = DMA_ERROR_CODE + (EMERGENCY_PAGES * PAGE_SIZE);
-       if (unlikely((dma_addr >= DMA_ERROR_CODE) && (dma_addr < badend))) {
+       if (unlikely(dma_addr < badend)) {
                WARN(1, KERN_ERR "Calgary: driver tried unmapping bad DMA "
                       "address 0x%Lx\n", dma_addr);
                return;
index 4cfba947d77429fd13d393819b1682879ed9aa06..69780edf0dde90c8c5b04211525dcd1137d73e73 100644 (file)
@@ -1176,6 +1176,20 @@ void __init setup_arch(char **cmdline_p)
        /* Allocate bigger log buffer */
        setup_log_buf(1);
 
+       if (efi_enabled(EFI_BOOT)) {
+               switch (boot_params.secure_boot) {
+               case efi_secureboot_mode_disabled:
+                       pr_info("Secure boot disabled\n");
+                       break;
+               case efi_secureboot_mode_enabled:
+                       pr_info("Secure boot enabled\n");
+                       break;
+               default:
+                       pr_info("Secure boot could not be determined\n");
+                       break;
+               }
+       }
+
        reserve_initrd();
 
        acpi_table_upgrade();
index 46732dc3b73cd67e874dfe99b0eeb807f816e06a..a0d38685f7dfdd397f8e8b792d2fe28c65a893e5 100644 (file)
@@ -433,9 +433,15 @@ static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
                int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
 
                if (c->phys_proc_id == o->phys_proc_id &&
-                   per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2) &&
-                   c->cpu_core_id == o->cpu_core_id)
-                       return topology_sane(c, o, "smt");
+                   per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2)) {
+                       if (c->cpu_core_id == o->cpu_core_id)
+                               return topology_sane(c, o, "smt");
+
+                       if ((c->cu_id != 0xff) &&
+                           (o->cu_id != 0xff) &&
+                           (c->cu_id == o->cu_id))
+                               return topology_sane(c, o, "smt");
+               }
 
        } else if (c->phys_proc_id == o->phys_proc_id &&
                   c->cpu_core_id == o->cpu_core_id) {
@@ -1341,8 +1347,7 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
        pr_info("CPU0: ");
        print_cpu_info(&cpu_data(0));
 
-       if (is_uv_system())
-               uv_system_init();
+       uv_system_init();
 
        set_mtrr_aps_delayed_init();
 
diff --git a/arch/x86/kernel/test_nx.c b/arch/x86/kernel/test_nx.c
deleted file mode 100644 (file)
index a3b875c..0000000
+++ /dev/null
@@ -1,173 +0,0 @@
-/*
- * test_nx.c: functional test for NX functionality
- *
- * (C) Copyright 2008 Intel Corporation
- * Author: Arjan van de Ven <arjan@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; version 2
- * of the License.
- */
-#include <linux/module.h>
-#include <linux/sort.h>
-#include <linux/slab.h>
-
-#include <linux/uaccess.h>
-#include <asm/asm.h>
-
-extern int rodata_test_data;
-
-/*
- * This file checks 4 things:
- * 1) Check if the stack is not executable
- * 2) Check if kmalloc memory is not executable
- * 3) Check if the .rodata section is not executable
- * 4) Check if the .data section of a module is not executable
- *
- * To do this, the test code tries to execute memory in stack/kmalloc/etc,
- * and then checks if the expected trap happens.
- *
- * Sadly, this implies having a dynamic exception handling table entry.
- * ... which can be done (and will make Rusty cry)... but it can only
- * be done in a stand-alone module with only 1 entry total.
- * (otherwise we'd have to sort and that's just too messy)
- */
-
-
-
-/*
- * We want to set up an exception handling point on our stack,
- * which means a variable value. This function is rather dirty
- * and walks the exception table of the module, looking for a magic
- * marker and replaces it with a specific function.
- */
-static void fudze_exception_table(void *marker, void *new)
-{
-       struct module *mod = THIS_MODULE;
-       struct exception_table_entry *extable;
-
-       /*
-        * Note: This module has only 1 exception table entry,
-        * so searching and sorting is not needed. If that changes,
-        * this would be the place to search and re-sort the exception
-        * table.
-        */
-       if (mod->num_exentries > 1) {
-               printk(KERN_ERR "test_nx: too many exception table entries!\n");
-               printk(KERN_ERR "test_nx: test results are not reliable.\n");
-               return;
-       }
-       extable = (struct exception_table_entry *)mod->extable;
-       extable[0].insn = (unsigned long)new;
-}
-
-
-/*
- * exception tables get their symbols translated so we need
- * to use a fake function to put in there, which we can then
- * replace at runtime.
- */
-void foo_label(void);
-
-/*
- * returns 0 for not-executable, negative for executable
- *
- * Note: we cannot allow this function to be inlined, because
- * that would give us more than 1 exception table entry.
- * This in turn would break the assumptions above.
- */
-static noinline int test_address(void *address)
-{
-       unsigned long result;
-
-       /* Set up an exception table entry for our address */
-       fudze_exception_table(&foo_label, address);
-       result = 1;
-       asm volatile(
-               "foo_label:\n"
-               "0:     call *%[fake_code]\n"
-               "1:\n"
-               ".section .fixup,\"ax\"\n"
-               "2:     mov %[zero], %[rslt]\n"
-               "       ret\n"
-               ".previous\n"
-               _ASM_EXTABLE(0b,2b)
-               : [rslt] "=r" (result)
-               : [fake_code] "r" (address), [zero] "r" (0UL), "0" (result)
-       );
-       /* change the exception table back for the next round */
-       fudze_exception_table(address, &foo_label);
-
-       if (result)
-               return -ENODEV;
-       return 0;
-}
-
-static unsigned char test_data = 0xC3; /* 0xC3 is the opcode for "ret" */
-
-static int test_NX(void)
-{
-       int ret = 0;
-       /* 0xC3 is the opcode for "ret" */
-       char stackcode[] = {0xC3, 0x90, 0 };
-       char *heap;
-
-       test_data = 0xC3;
-
-       printk(KERN_INFO "Testing NX protection\n");
-
-       /* Test 1: check if the stack is not executable */
-       if (test_address(&stackcode)) {
-               printk(KERN_ERR "test_nx: stack was executable\n");
-               ret = -ENODEV;
-       }
-
-
-       /* Test 2: Check if the heap is executable */
-       heap = kmalloc(64, GFP_KERNEL);
-       if (!heap)
-               return -ENOMEM;
-       heap[0] = 0xC3; /* opcode for "ret" */
-
-       if (test_address(heap)) {
-               printk(KERN_ERR "test_nx: heap was executable\n");
-               ret = -ENODEV;
-       }
-       kfree(heap);
-
-       /*
-        * The following 2 tests currently fail, this needs to get fixed
-        * Until then, don't run them to avoid too many people getting scared
-        * by the error message
-        */
-
-       /* Test 3: Check if the .rodata section is executable */
-       if (rodata_test_data != 0xC3) {
-               printk(KERN_ERR "test_nx: .rodata marker has invalid value\n");
-               ret = -ENODEV;
-       } else if (test_address(&rodata_test_data)) {
-               printk(KERN_ERR "test_nx: .rodata section is executable\n");
-               ret = -ENODEV;
-       }
-
-#if 0
-       /* Test 4: Check if the .data section of a module is executable */
-       if (test_address(&test_data)) {
-               printk(KERN_ERR "test_nx: .data section is executable\n");
-               ret = -ENODEV;
-       }
-
-#endif
-       return ret;
-}
-
-static void test_exit(void)
-{
-}
-
-module_init(test_NX);
-module_exit(test_exit);
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Testcase for the NX infrastructure");
-MODULE_AUTHOR("Arjan van de Ven <arjan@linux.intel.com>");
index bf0c6d049080beb9e3635b0099da86d7a74fd7bd..1dc86ee60a0319e7d26494744918bb7a1ecff355 100644 (file)
@@ -563,11 +563,9 @@ dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code)
         * as we may switch to the interrupt stack.
         */
        debug_stack_usage_inc();
-       preempt_disable();
        cond_local_irq_enable(regs);
        do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL);
        cond_local_irq_disable(regs);
-       preempt_enable_no_resched();
        debug_stack_usage_dec();
 exit:
        ist_exit(regs);
@@ -742,14 +740,12 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
        debug_stack_usage_inc();
 
        /* It's safe to allow irq's after DR6 has been saved */
-       preempt_disable();
        cond_local_irq_enable(regs);
 
        if (v8086_mode(regs)) {
                handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
                                        X86_TRAP_DB);
                cond_local_irq_disable(regs);
-               preempt_enable_no_resched();
                debug_stack_usage_dec();
                goto exit;
        }
@@ -769,7 +765,6 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
        if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp)
                send_sigtrap(tsk, regs, error_code, si_code);
        cond_local_irq_disable(regs);
-       preempt_enable_no_resched();
        debug_stack_usage_dec();
 
 exit:
index e41af597aed8e2e454965f8459f26b372c091b4f..2724dc82f992ef2845fddc55c5965c9a00aa0113 100644 (file)
@@ -1107,6 +1107,16 @@ static u64 read_tsc(struct clocksource *cs)
        return (u64)rdtsc_ordered();
 }
 
+static void tsc_cs_mark_unstable(struct clocksource *cs)
+{
+       if (tsc_unstable)
+               return;
+       tsc_unstable = 1;
+       clear_sched_clock_stable();
+       disable_sched_clock_irqtime();
+       pr_info("Marking TSC unstable due to clocksource watchdog\n");
+}
+
 /*
  * .mask MUST be CLOCKSOURCE_MASK(64). See comment above read_tsc()
  */
@@ -1119,6 +1129,7 @@ static struct clocksource clocksource_tsc = {
                                  CLOCK_SOURCE_MUST_VERIFY,
        .archdata               = { .vclock_mode = VCLOCK_TSC },
        .resume                 = tsc_resume,
+       .mark_unstable          = tsc_cs_mark_unstable,
 };
 
 void mark_tsc_unstable(char *reason)
@@ -1356,6 +1367,9 @@ void __init tsc_init(void)
                (unsigned long)cpu_khz / 1000,
                (unsigned long)cpu_khz % 1000);
 
+       /* Sanitize TSC ADJUST before cyc2ns gets initialized */
+       tsc_store_and_check_tsc_adjust(true);
+
        /*
         * Secondary CPUs do not run through tsc_init(), so set up
         * all the scale factors for all CPUs, assuming the same
@@ -1386,8 +1400,6 @@ void __init tsc_init(void)
 
        if (unsynchronized_tsc())
                mark_tsc_unstable("TSCs unsynchronized");
-       else
-               tsc_store_and_check_tsc_adjust(true);
 
        check_system_tsc_reliable();
 
index d0db011051a54212742680d3f25c2c52fb5c8ea6..728f7537847583108075d91ad4aa75edfd86f077 100644 (file)
@@ -286,13 +286,6 @@ void check_tsc_sync_source(int cpu)
        if (unsynchronized_tsc())
                return;
 
-       if (tsc_clocksource_reliable) {
-               if (cpu == (nr_cpu_ids-1) || system_state != SYSTEM_BOOTING)
-                       pr_info(
-                       "Skipped synchronization checks as TSC is reliable.\n");
-               return;
-       }
-
        /*
         * Set the maximum number of test runs to
         *  1 if the CPU does not provide the TSC_ADJUST MSR
@@ -380,14 +373,19 @@ void check_tsc_sync_target(void)
        int cpus = 2;
 
        /* Also aborts if there is no TSC. */
-       if (unsynchronized_tsc() || tsc_clocksource_reliable)
+       if (unsynchronized_tsc())
                return;
 
        /*
         * Store, verify and sanitize the TSC adjust register. If
         * successful skip the test.
+        *
+        * The test is also skipped when the TSC is marked reliable. This
+        * is true for SoCs which have no fallback clocksource. On these
+        * SoCs the TSC is frequency synchronized, but still the TSC ADJUST
+        * register might have been wreckaged by the BIOS..
         */
-       if (tsc_store_and_check_tsc_adjust(false)) {
+       if (tsc_store_and_check_tsc_adjust(false) || tsc_clocksource_reliable) {
                atomic_inc(&skip_test);
                return;
        }
index ec5d7545e6dcbe2d1724e48b5e595b01071a3adc..0442d98367aec50f3810a332c81be5eac0a29a95 100644 (file)
@@ -160,11 +160,12 @@ void save_v86_state(struct kernel_vm86_regs *regs, int retval)
 
 static void mark_screen_rdonly(struct mm_struct *mm)
 {
+       struct vm_area_struct *vma;
+       spinlock_t *ptl;
        pgd_t *pgd;
        pud_t *pud;
        pmd_t *pmd;
        pte_t *pte;
-       spinlock_t *ptl;
        int i;
 
        down_write(&mm->mmap_sem);
@@ -177,7 +178,7 @@ static void mark_screen_rdonly(struct mm_struct *mm)
        pmd = pmd_offset(pud, 0xA0000);
 
        if (pmd_trans_huge(*pmd)) {
-               struct vm_area_struct *vma = find_vma(mm, 0xA0000);
+               vma = find_vma(mm, 0xA0000);
                split_huge_pmd(vma, pmd, 0xA0000);
        }
        if (pmd_none_or_clear_bad(pmd))
index 1572c35b4f1a637b2ebb622ae88c5e7e14eafd63..2ecd7dab4631a2ef5e729bb03fb4568ed8ce0c3e 100644 (file)
@@ -964,10 +964,11 @@ static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
 /* Calculate cpu time spent by current task in 100ns units */
 static u64 current_task_runtime_100ns(void)
 {
-       cputime_t utime, stime;
+       u64 utime, stime;
 
        task_cputime_adjusted(current, &utime, &stime);
-       return div_u64(cputime_to_nsecs(utime + stime), 100);
+
+       return div_u64(utime + stime, 100);
 }
 
 static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
index d153be8929a68440ae5e5894497cbb7fa1ab9913..e52c9088660fac47d6da377b39412378ff0157b0 100644 (file)
@@ -3182,6 +3182,7 @@ static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
        memcpy(dest, xsave, XSAVE_HDR_OFFSET);
 
        /* Set XSTATE_BV */
+       xstate_bv &= vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FPSSE;
        *(u64 *)(dest + XSAVE_HDR_OFFSET) = xstate_bv;
 
        /*
index 073d1f1a620bd6797973eaa05338d918c9dd7b54..a8e91ae89fb3048c78aa4a279565070bb1958902 100644 (file)
@@ -156,13 +156,13 @@ EXPORT_SYMBOL(__delay);
 
 inline void __const_udelay(unsigned long xloops)
 {
+       unsigned long lpj = this_cpu_read(cpu_info.loops_per_jiffy) ? : loops_per_jiffy;
        int d0;
 
        xloops *= 4;
        asm("mull %%edx"
                :"=d" (xloops), "=&a" (d0)
-               :"1" (xloops), "0"
-               (this_cpu_read(cpu_info.loops_per_jiffy) * (HZ/4)));
+               :"1" (xloops), "0" (lpj * (HZ / 4)));
 
        __delay(++xloops);
 }
index ea9c49adaa1fce1f6cbf3962800e0b24bdf72c91..58b5bee7ea27011419a0c81f5efeac3964a3ad71 100644 (file)
 #include <linux/debugfs.h>
 #include <linux/mm.h>
 #include <linux/init.h>
+#include <linux/sched.h>
 #include <linux/seq_file.h>
 
+#include <asm/kasan.h>
 #include <asm/pgtable.h>
 
 /*
@@ -50,6 +52,10 @@ enum address_markers_idx {
        LOW_KERNEL_NR,
        VMALLOC_START_NR,
        VMEMMAP_START_NR,
+#ifdef CONFIG_KASAN
+       KASAN_SHADOW_START_NR,
+       KASAN_SHADOW_END_NR,
+#endif
 # ifdef CONFIG_X86_ESPFIX64
        ESPFIX_START_NR,
 # endif
@@ -75,6 +81,10 @@ static struct addr_marker address_markers[] = {
        { 0/* PAGE_OFFSET */,   "Low Kernel Mapping" },
        { 0/* VMALLOC_START */, "vmalloc() Area" },
        { 0/* VMEMMAP_START */, "Vmemmap" },
+#ifdef CONFIG_KASAN
+       { KASAN_SHADOW_START,   "KASAN shadow" },
+       { KASAN_SHADOW_END,     "KASAN shadow end" },
+#endif
 # ifdef CONFIG_X86_ESPFIX64
        { ESPFIX_BASE_ADDR,     "ESPfix Area", 16 },
 # endif
@@ -326,18 +336,31 @@ static void walk_pmd_level(struct seq_file *m, struct pg_state *st, pud_t addr,
 
 #if PTRS_PER_PUD > 1
 
+/*
+ * This is an optimization for CONFIG_DEBUG_WX=y + CONFIG_KASAN=y
+ * KASAN fills page tables with the same values. Since there is no
+ * point in checking page table more than once we just skip repeated
+ * entries. This saves us dozens of seconds during boot.
+ */
+static bool pud_already_checked(pud_t *prev_pud, pud_t *pud, bool checkwx)
+{
+       return checkwx && prev_pud && (pud_val(*prev_pud) == pud_val(*pud));
+}
+
 static void walk_pud_level(struct seq_file *m, struct pg_state *st, pgd_t addr,
                                                        unsigned long P)
 {
        int i;
        pud_t *start;
        pgprotval_t prot;
+       pud_t *prev_pud = NULL;
 
        start = (pud_t *) pgd_page_vaddr(addr);
 
        for (i = 0; i < PTRS_PER_PUD; i++) {
                st->current_address = normalize_addr(P + i * PUD_LEVEL_MULT);
-               if (!pud_none(*start)) {
+               if (!pud_none(*start) &&
+                   !pud_already_checked(prev_pud, start, st->check_wx)) {
                        if (pud_large(*start) || !pud_present(*start)) {
                                prot = pud_flags(*start);
                                note_page(m, st, __pgprot(prot), 2);
@@ -348,6 +371,7 @@ static void walk_pud_level(struct seq_file *m, struct pg_state *st, pgd_t addr,
                } else
                        note_page(m, st, __pgprot(0), 2);
 
+               prev_pud = start;
                start++;
        }
 }
@@ -406,6 +430,7 @@ static void ptdump_walk_pgd_level_core(struct seq_file *m, pgd_t *pgd,
                } else
                        note_page(m, &st, __pgprot(0), 1);
 
+               cond_resched();
                start++;
        }
 
index 5a287e523eab0b42d6e9b1e73574dc7ead77502b..28d42130243c0fdd463e7c2221af83115cce6bec 100644 (file)
@@ -214,7 +214,20 @@ static void cpa_flush_array(unsigned long *start, int numpages, int cache,
                            int in_flags, struct page **pages)
 {
        unsigned int i, level;
+#ifdef CONFIG_PREEMPT
+       /*
+        * Avoid wbinvd() because it causes latencies on all CPUs,
+        * regardless of any CPU isolation that may be in effect.
+        *
+        * This should be extended for CAT enabled systems independent of
+        * PREEMPT because wbinvd() does not respect the CAT partitions and
+        * this is exposed to unpriviledged users through the graphics
+        * subsystem.
+        */
+       unsigned long do_wbinvd = 0;
+#else
        unsigned long do_wbinvd = cache && numpages >= 1024; /* 4M threshold */
+#endif
 
        BUG_ON(irqs_disabled());
 
index 159b52ccd600eff0ce0ceee71deb1139bfb3ffc1..d76485b228243c2215b287e3d7dc1012f685f425 100644 (file)
@@ -47,7 +47,7 @@ static u64 get_subtree_max_end(struct rb_node *node)
 {
        u64 ret = 0;
        if (node) {
-               struct memtype *data = container_of(node, struct memtype, rb);
+               struct memtype *data = rb_entry(node, struct memtype, rb);
                ret = data->subtree_max_end;
        }
        return ret;
@@ -79,7 +79,7 @@ static struct memtype *memtype_rb_lowest_match(struct rb_root *root,
        struct memtype *last_lower = NULL;
 
        while (node) {
-               struct memtype *data = container_of(node, struct memtype, rb);
+               struct memtype *data = rb_entry(node, struct memtype, rb);
 
                if (get_subtree_max_end(node->rb_left) > start) {
                        /* Lowest overlap if any must be on left side */
@@ -121,7 +121,7 @@ static struct memtype *memtype_rb_match(struct rb_root *root,
 
                node = rb_next(&match->rb);
                if (node)
-                       match = container_of(node, struct memtype, rb);
+                       match = rb_entry(node, struct memtype, rb);
                else
                        match = NULL;
        }
@@ -150,7 +150,7 @@ static int memtype_rb_check_conflict(struct rb_root *root,
 
        node = rb_next(&match->rb);
        while (node) {
-               match = container_of(node, struct memtype, rb);
+               match = rb_entry(node, struct memtype, rb);
 
                if (match->start >= end) /* Checked all possible matches */
                        goto success;
@@ -181,7 +181,7 @@ static void memtype_rb_insert(struct rb_root *root, struct memtype *newdata)
        struct rb_node *parent = NULL;
 
        while (*node) {
-               struct memtype *data = container_of(*node, struct memtype, rb);
+               struct memtype *data = rb_entry(*node, struct memtype, rb);
 
                parent = *node;
                if (data->subtree_max_end < newdata->end)
@@ -270,7 +270,7 @@ int rbt_memtype_copy_nth_element(struct memtype *out, loff_t pos)
        }
 
        if (node) { /* pos == i */
-               struct memtype *this = container_of(node, struct memtype, rb);
+               struct memtype *this = rb_entry(node, struct memtype, rb);
                *out = *this;
                return 0;
        } else {
index 6aad870e89620974a685c40300f77cb7694a6f93..04ca8764f0c096f4e3f006ab74e4dc55996735a1 100644 (file)
@@ -19,8 +19,7 @@
 #include <linux/efi.h>
 #include <linux/efi-bgrt.h>
 
-struct acpi_table_bgrt *bgrt_tab;
-void *__initdata bgrt_image;
+struct acpi_table_bgrt bgrt_tab;
 size_t __initdata bgrt_image_size;
 
 struct bmp_header {
@@ -28,66 +27,58 @@ struct bmp_header {
        u32 size;
 } __packed;
 
-void __init efi_bgrt_init(void)
+void __init efi_bgrt_init(struct acpi_table_header *table)
 {
-       acpi_status status;
        void *image;
        struct bmp_header bmp_header;
+       struct acpi_table_bgrt *bgrt = &bgrt_tab;
 
        if (acpi_disabled)
                return;
 
-       status = acpi_get_table("BGRT", 0,
-                               (struct acpi_table_header **)&bgrt_tab);
-       if (ACPI_FAILURE(status))
-               return;
-
-       if (bgrt_tab->header.length < sizeof(*bgrt_tab)) {
+       if (table->length < sizeof(bgrt_tab)) {
                pr_notice("Ignoring BGRT: invalid length %u (expected %zu)\n",
-                      bgrt_tab->header.length, sizeof(*bgrt_tab));
+                      table->length, sizeof(bgrt_tab));
                return;
        }
-       if (bgrt_tab->version != 1) {
+       *bgrt = *(struct acpi_table_bgrt *)table;
+       if (bgrt->version != 1) {
                pr_notice("Ignoring BGRT: invalid version %u (expected 1)\n",
-                      bgrt_tab->version);
-               return;
+                      bgrt->version);
+               goto out;
        }
-       if (bgrt_tab->status & 0xfe) {
+       if (bgrt->status & 0xfe) {
                pr_notice("Ignoring BGRT: reserved status bits are non-zero %u\n",
-                      bgrt_tab->status);
-               return;
+                      bgrt->status);
+               goto out;
        }
-       if (bgrt_tab->image_type != 0) {
+       if (bgrt->image_type != 0) {
                pr_notice("Ignoring BGRT: invalid image type %u (expected 0)\n",
-                      bgrt_tab->image_type);
-               return;
+                      bgrt->image_type);
+               goto out;
        }
-       if (!bgrt_tab->image_address) {
+       if (!bgrt->image_address) {
                pr_notice("Ignoring BGRT: null image address\n");
-               return;
+               goto out;
        }
 
-       image = memremap(bgrt_tab->image_address, sizeof(bmp_header), MEMREMAP_WB);
+       image = early_memremap(bgrt->image_address, sizeof(bmp_header));
        if (!image) {
                pr_notice("Ignoring BGRT: failed to map image header memory\n");
-               return;
+               goto out;
        }
 
        memcpy(&bmp_header, image, sizeof(bmp_header));
-       memunmap(image);
+       early_memunmap(image, sizeof(bmp_header));
        if (bmp_header.id != 0x4d42) {
                pr_notice("Ignoring BGRT: Incorrect BMP magic number 0x%x (expected 0x4d42)\n",
                        bmp_header.id);
-               return;
+               goto out;
        }
        bgrt_image_size = bmp_header.size;
+       efi_mem_reserve(bgrt->image_address, bgrt_image_size);
 
-       bgrt_image = memremap(bgrt_tab->image_address, bmp_header.size, MEMREMAP_WB);
-       if (!bgrt_image) {
-               pr_notice("Ignoring BGRT: failed to map image memory\n");
-               bgrt_image = NULL;
-               return;
-       }
-
-       efi_mem_reserve(bgrt_tab->image_address, bgrt_image_size);
+       return;
+out:
+       memset(bgrt, 0, sizeof(bgrt_tab));
 }
index 274dfc48184977db435a9c5c78607cd8a182067c..565dff3c9a12cf1f042fae494d1bdb3a55d032b3 100644 (file)
@@ -542,11 +542,6 @@ void __init efi_init(void)
                efi_print_memmap();
 }
 
-void __init efi_late_init(void)
-{
-       efi_bgrt_init();
-}
-
 void __init efi_set_executable(efi_memory_desc_t *md, bool executable)
 {
        u64 addr, npages;
@@ -960,6 +955,11 @@ static void __init __efi_enter_virtual_mode(void)
                return;
        }
 
+       if (efi_enabled(EFI_DBG)) {
+               pr_info("EFI runtime memory map:\n");
+               efi_print_memmap();
+       }
+
        BUG_ON(!efi.systab);
 
        if (efi_setup_page_tables(pa, 1 << pg_shift)) {
index 319148bd4b05091d24576a7535b10aad7bec0c2d..a4695da42d77b39cec3b65083c7b7115b1faa32d 100644 (file)
@@ -268,6 +268,22 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
 
        efi_scratch.use_pgd = true;
 
+       /*
+        * Certain firmware versions are way too sentimential and still believe
+        * they are exclusive and unquestionable owners of the first physical page,
+        * even though they explicitly mark it as EFI_CONVENTIONAL_MEMORY
+        * (but then write-access it later during SetVirtualAddressMap()).
+        *
+        * Create a 1:1 mapping for this page, to avoid triple faults during early
+        * boot with such firmware. We are free to hand this page to the BIOS,
+        * as trim_bios_range() will reserve the first page and isolate it away
+        * from memory allocators anyway.
+        */
+       if (kernel_map_pages_in_pgd(pgd, 0x0, 0x0, 1, _PAGE_RW)) {
+               pr_err("Failed to create 1:1 mapping for the first page!\n");
+               return 1;
+       }
+
        /*
         * When making calls to the firmware everything needs to be 1:1
         * mapped and addressable with 32-bit pointers. Map the kernel
@@ -398,10 +414,44 @@ void __init parse_efi_setup(u64 phys_addr, u32 data_len)
        efi_setup = phys_addr + sizeof(struct setup_data);
 }
 
-void __init efi_runtime_update_mappings(void)
+static int __init efi_update_mappings(efi_memory_desc_t *md, unsigned long pf)
 {
        unsigned long pfn;
        pgd_t *pgd = efi_pgd;
+       int err1, err2;
+
+       /* Update the 1:1 mapping */
+       pfn = md->phys_addr >> PAGE_SHIFT;
+       err1 = kernel_map_pages_in_pgd(pgd, pfn, md->phys_addr, md->num_pages, pf);
+       if (err1) {
+               pr_err("Error while updating 1:1 mapping PA 0x%llx -> VA 0x%llx!\n",
+                          md->phys_addr, md->virt_addr);
+       }
+
+       err2 = kernel_map_pages_in_pgd(pgd, pfn, md->virt_addr, md->num_pages, pf);
+       if (err2) {
+               pr_err("Error while updating VA mapping PA 0x%llx -> VA 0x%llx!\n",
+                          md->phys_addr, md->virt_addr);
+       }
+
+       return err1 || err2;
+}
+
+static int __init efi_update_mem_attr(struct mm_struct *mm, efi_memory_desc_t *md)
+{
+       unsigned long pf = 0;
+
+       if (md->attribute & EFI_MEMORY_XP)
+               pf |= _PAGE_NX;
+
+       if (!(md->attribute & EFI_MEMORY_RO))
+               pf |= _PAGE_RW;
+
+       return efi_update_mappings(md, pf);
+}
+
+void __init efi_runtime_update_mappings(void)
+{
        efi_memory_desc_t *md;
 
        if (efi_enabled(EFI_OLD_MEMMAP)) {
@@ -410,6 +460,24 @@ void __init efi_runtime_update_mappings(void)
                return;
        }
 
+       /*
+        * Use the EFI Memory Attribute Table for mapping permissions if it
+        * exists, since it is intended to supersede EFI_PROPERTIES_TABLE.
+        */
+       if (efi_enabled(EFI_MEM_ATTR)) {
+               efi_memattr_apply_permissions(NULL, efi_update_mem_attr);
+               return;
+       }
+
+       /*
+        * EFI_MEMORY_ATTRIBUTES_TABLE is intended to replace
+        * EFI_PROPERTIES_TABLE. So, use EFI_PROPERTIES_TABLE to update
+        * permissions only if EFI_MEMORY_ATTRIBUTES_TABLE is not
+        * published by the firmware. Even if we find a buggy implementation of
+        * EFI_MEMORY_ATTRIBUTES_TABLE, don't fall back to
+        * EFI_PROPERTIES_TABLE, because of the same reason.
+        */
+
        if (!efi_enabled(EFI_NX_PE_DATA))
                return;
 
@@ -430,15 +498,7 @@ void __init efi_runtime_update_mappings(void)
                        (md->type != EFI_RUNTIME_SERVICES_CODE))
                        pf |= _PAGE_RW;
 
-               /* Update the 1:1 mapping */
-               pfn = md->phys_addr >> PAGE_SHIFT;
-               if (kernel_map_pages_in_pgd(pgd, pfn, md->phys_addr, md->num_pages, pf))
-                       pr_warn("Error mapping PA 0x%llx -> VA 0x%llx!\n",
-                                  md->phys_addr, md->virt_addr);
-
-               if (kernel_map_pages_in_pgd(pgd, pfn, md->virt_addr, md->num_pages, pf))
-                       pr_warn("Error mapping PA 0x%llx -> VA 0x%llx!\n",
-                                  md->phys_addr, md->virt_addr);
+               efi_update_mappings(md, pf);
        }
 }
 
index 90e4f2a6625b6ae2a87cf7c5ec9792a5459cd0e1..a7dbec4dce2758261c6e1680b7ed825e5e44a9d1 100644 (file)
@@ -5,14 +5,12 @@ obj-$(subst m,y,$(CONFIG_MMC_SDHCI_PCI)) += platform_mrfld_sd.o
 # WiFi
 obj-$(subst m,y,$(CONFIG_BRCMFMAC_SDIO)) += platform_bcm43xx.o
 # IPC Devices
-obj-y += platform_ipc.o
 obj-$(subst m,y,$(CONFIG_MFD_INTEL_MSIC)) += platform_msic.o
 obj-$(subst m,y,$(CONFIG_SND_MFLD_MACHINE)) += platform_msic_audio.o
 obj-$(subst m,y,$(CONFIG_GPIO_MSIC)) += platform_msic_gpio.o
 obj-$(subst m,y,$(CONFIG_MFD_INTEL_MSIC)) += platform_msic_ocd.o
 obj-$(subst m,y,$(CONFIG_MFD_INTEL_MSIC)) += platform_msic_battery.o
 obj-$(subst m,y,$(CONFIG_INTEL_MID_POWER_BUTTON)) += platform_msic_power_btn.o
-obj-$(subst m,y,$(CONFIG_GPIO_INTEL_PMIC)) += platform_pmic_gpio.o
 obj-$(subst m,y,$(CONFIG_INTEL_MFLD_THERMAL)) += platform_msic_thermal.o
 # SPI Devices
 obj-$(subst m,y,$(CONFIG_SPI_SPIDEV)) += platform_mrfld_spidev.o
@@ -28,4 +26,5 @@ obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_pcal9555a.o
 obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_tca6416.o
 # MISC Devices
 obj-$(subst m,y,$(CONFIG_KEYBOARD_GPIO)) += platform_gpio_keys.o
+obj-$(subst m,y,$(CONFIG_RTC_DRV_CMOS)) += platform_mrfld_rtc.o
 obj-$(subst m,y,$(CONFIG_INTEL_MID_WATCHDOG)) += platform_mrfld_wdt.o
index 52534ec29765358d0a40b88073ea3de2f7fa012b..74283875c7e83925b8bd7c9f5951cd4a0b4430d0 100644 (file)
@@ -32,6 +32,9 @@ static struct gpio_keys_button gpio_button[] = {
        {SW_LID,                -1, 1, "lid_switch",    EV_SW,  0, 20},
        {KEY_VOLUMEUP,          -1, 1, "vol_up",        EV_KEY, 0, 20},
        {KEY_VOLUMEDOWN,        -1, 1, "vol_down",      EV_KEY, 0, 20},
+       {KEY_MUTE,              -1, 1, "mute_enable",   EV_KEY, 0, 20},
+       {KEY_VOLUMEUP,          -1, 1, "volume_up",     EV_KEY, 0, 20},
+       {KEY_VOLUMEDOWN,        -1, 1, "volume_down",   EV_KEY, 0, 20},
        {KEY_CAMERA,            -1, 1, "camera_full",   EV_KEY, 0, 20},
        {KEY_CAMERA_FOCUS,      -1, 1, "camera_half",   EV_KEY, 0, 20},
        {SW_KEYPAD_SLIDE,       -1, 1, "MagSw1",        EV_SW,  0, 20},
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_ipc.c b/arch/x86/platform/intel-mid/device_libs/platform_ipc.c
deleted file mode 100644 (file)
index a84b73d..0000000
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * platform_ipc.c: IPC platform library file
- *
- * (C) Copyright 2013 Intel Corporation
- * Author: Sathyanarayanan Kuppuswamy <sathyanarayanan.kuppuswamy@intel.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; version 2
- * of the License.
- */
-
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/interrupt.h>
-#include <linux/sfi.h>
-#include <linux/gpio.h>
-#include <asm/intel-mid.h>
-#include "platform_ipc.h"
-
-void __init ipc_device_handler(struct sfi_device_table_entry *pentry,
-                               struct devs_id *dev)
-{
-       struct platform_device *pdev;
-       void *pdata = NULL;
-       static struct resource res __initdata = {
-               .name = "IRQ",
-               .flags = IORESOURCE_IRQ,
-       };
-
-       pr_debug("IPC bus, name = %16.16s, irq = 0x%2x\n",
-               pentry->name, pentry->irq);
-
-       /*
-        * We need to call platform init of IPC devices to fill misc_pdata
-        * structure. It will be used in msic_init for initialization.
-        */
-       if (dev != NULL)
-               pdata = dev->get_platform_data(pentry);
-
-       /*
-        * On Medfield the platform device creation is handled by the MSIC
-        * MFD driver so we don't need to do it here.
-        */
-       if (intel_mid_has_msic())
-               return;
-
-       pdev = platform_device_alloc(pentry->name, 0);
-       if (pdev == NULL) {
-               pr_err("out of memory for SFI platform device '%s'.\n",
-                       pentry->name);
-               return;
-       }
-       res.start = pentry->irq;
-       platform_device_add_resources(pdev, &res, 1);
-
-       pdev->dev.platform_data = pdata;
-       intel_scu_device_register(pdev);
-}
-
-static const struct devs_id pmic_audio_dev_id __initconst = {
-       .name = "pmic_audio",
-       .type = SFI_DEV_TYPE_IPC,
-       .delay = 1,
-       .device_handler = &ipc_device_handler,
-};
-
-sfi_device(pmic_audio_dev_id);
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_ipc.h b/arch/x86/platform/intel-mid/device_libs/platform_ipc.h
deleted file mode 100644 (file)
index 79bb09d..0000000
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- * platform_ipc.h: IPC platform library header file
- *
- * (C) Copyright 2013 Intel Corporation
- * Author: Sathyanarayanan Kuppuswamy <sathyanarayanan.kuppuswamy@intel.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; version 2
- * of the License.
- */
-#ifndef _PLATFORM_IPC_H_
-#define _PLATFORM_IPC_H_
-
-void __init
-ipc_device_handler(struct sfi_device_table_entry *pentry, struct devs_id *dev);
-
-#endif
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_rtc.c b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_rtc.c
new file mode 100644 (file)
index 0000000..3135416
--- /dev/null
@@ -0,0 +1,48 @@
+/*
+ * Intel Merrifield legacy RTC initialization file
+ *
+ * (C) Copyright 2017 Intel Corporation
+ *
+ * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/init.h>
+
+#include <asm/hw_irq.h>
+#include <asm/intel-mid.h>
+#include <asm/io_apic.h>
+#include <asm/time.h>
+#include <asm/x86_init.h>
+
+static int __init mrfld_legacy_rtc_alloc_irq(void)
+{
+       struct irq_alloc_info info;
+       int ret;
+
+       if (!x86_platform.legacy.rtc)
+               return -ENODEV;
+
+       ioapic_set_alloc_attr(&info, NUMA_NO_NODE, 1, 0);
+       ret = mp_map_gsi_to_irq(RTC_IRQ, IOAPIC_MAP_ALLOC, &info);
+       if (ret < 0) {
+               pr_info("Failed to allocate RTC interrupt. Disabling RTC\n");
+               x86_platform.legacy.rtc = 0;
+               return ret;
+       }
+
+       return 0;
+}
+
+static int __init mrfld_legacy_rtc_init(void)
+{
+       if (intel_mid_identify_cpu() != INTEL_MID_CPU_CHIP_TANGIER)
+               return -ENODEV;
+
+       return mrfld_legacy_rtc_alloc_irq();
+}
+arch_initcall(mrfld_legacy_rtc_init);
index 3f1f1c77d0903a9a4e435979586d026b30c66741..86edd1e941eb07bc46187024ae332409c6924073 100644 (file)
@@ -28,9 +28,9 @@ static struct platform_device wdt_dev = {
 
 static int tangier_probe(struct platform_device *pdev)
 {
-       int gsi;
        struct irq_alloc_info info;
        struct intel_mid_wdt_pdata *pdata = pdev->dev.platform_data;
+       int gsi, irq;
 
        if (!pdata)
                return -EINVAL;
@@ -38,10 +38,10 @@ static int tangier_probe(struct platform_device *pdev)
        /* IOAPIC builds identity mapping between GSI and IRQ on MID */
        gsi = pdata->irq;
        ioapic_set_alloc_attr(&info, cpu_to_node(0), 1, 0);
-       if (mp_map_gsi_to_irq(gsi, IOAPIC_MAP_ALLOC, &info) <= 0) {
-               dev_warn(&pdev->dev, "cannot find interrupt %d in ioapic\n",
-                        gsi);
-               return -EINVAL;
+       irq = mp_map_gsi_to_irq(gsi, IOAPIC_MAP_ALLOC, &info);
+       if (irq < 0) {
+               dev_warn(&pdev->dev, "cannot find interrupt %d in ioapic\n", gsi);
+               return irq;
        }
 
        return 0;
@@ -82,4 +82,4 @@ static int __init register_mid_wdt(void)
 
        return 0;
 }
-rootfs_initcall(register_mid_wdt);
+arch_initcall(register_mid_wdt);
index cb3490ecb341227062125e12f28b8bcdc47f07e2..d4dc744dd5a54876843bb6326128a253eebc0d87 100644 (file)
@@ -20,7 +20,6 @@
 #include <asm/intel-mid.h>
 
 #include "platform_msic.h"
-#include "platform_ipc.h"
 
 static void *msic_audio_platform_data(void *info)
 {
@@ -40,8 +39,8 @@ static const struct devs_id msic_audio_dev_id __initconst = {
        .name = "msic_audio",
        .type = SFI_DEV_TYPE_IPC,
        .delay = 1,
+       .msic = 1,
        .get_platform_data = &msic_audio_platform_data,
-       .device_handler = &ipc_device_handler,
 };
 
 sfi_device(msic_audio_dev_id);
index 4f72193939a68c668cba48eed07a2f6d236fd4b7..5c3e9919633fa0885224f35be92f1f6316e0fe88 100644 (file)
@@ -19,7 +19,6 @@
 #include <asm/intel-mid.h>
 
 #include "platform_msic.h"
-#include "platform_ipc.h"
 
 static void __init *msic_battery_platform_data(void *info)
 {
@@ -30,8 +29,8 @@ static const struct devs_id msic_battery_dev_id __initconst = {
        .name = "msic_battery",
        .type = SFI_DEV_TYPE_IPC,
        .delay = 1,
+       .msic = 1,
        .get_platform_data = &msic_battery_platform_data,
-       .device_handler = &ipc_device_handler,
 };
 
 sfi_device(msic_battery_dev_id);
index 70de5b531ba0b7959eb8b4b26afc436b5aae300a..9fdb88d460d79da8ce15ab4953da8588e82e9f40 100644 (file)
@@ -20,7 +20,6 @@
 #include <asm/intel-mid.h>
 
 #include "platform_msic.h"
-#include "platform_ipc.h"
 
 static void __init *msic_gpio_platform_data(void *info)
 {
@@ -41,8 +40,8 @@ static const struct devs_id msic_gpio_dev_id __initconst = {
        .name = "msic_gpio",
        .type = SFI_DEV_TYPE_IPC,
        .delay = 1,
+       .msic = 1,
        .get_platform_data = &msic_gpio_platform_data,
-       .device_handler = &ipc_device_handler,
 };
 
 sfi_device(msic_gpio_dev_id);
index 3d7c2011b6cfe41b431ae108041e5615cf157d98..7ae37cdbf256733c02c900e73e0ff4a15eddf471 100644 (file)
@@ -20,7 +20,6 @@
 #include <asm/intel-mid.h>
 
 #include "platform_msic.h"
-#include "platform_ipc.h"
 
 static void __init *msic_ocd_platform_data(void *info)
 {
@@ -42,8 +41,8 @@ static const struct devs_id msic_ocd_dev_id __initconst = {
        .name = "msic_ocd",
        .type = SFI_DEV_TYPE_IPC,
        .delay = 1,
+       .msic = 1,
        .get_platform_data = &msic_ocd_platform_data,
-       .device_handler = &ipc_device_handler,
 };
 
 sfi_device(msic_ocd_dev_id);
index 038f618fbc525592dc269e08658d3c47b6868771..96809b98cf693df3910e3cce00181ad6cb2f7303 100644 (file)
@@ -18,7 +18,6 @@
 #include <asm/intel-mid.h>
 
 #include "platform_msic.h"
-#include "platform_ipc.h"
 
 static void __init *msic_power_btn_platform_data(void *info)
 {
@@ -29,8 +28,8 @@ static const struct devs_id msic_power_btn_dev_id __initconst = {
        .name = "msic_power_btn",
        .type = SFI_DEV_TYPE_IPC,
        .delay = 1,
+       .msic = 1,
        .get_platform_data = &msic_power_btn_platform_data,
-       .device_handler = &ipc_device_handler,
 };
 
 sfi_device(msic_power_btn_dev_id);
index 114a5755b1e49a181f0577c93164518106aa4dfd..3e4167d246cdd98cb2978d0c799004816c026736 100644 (file)
@@ -19,7 +19,6 @@
 #include <asm/intel-mid.h>
 
 #include "platform_msic.h"
-#include "platform_ipc.h"
 
 static void __init *msic_thermal_platform_data(void *info)
 {
@@ -30,8 +29,8 @@ static const struct devs_id msic_thermal_dev_id __initconst = {
        .name = "msic_thermal",
        .type = SFI_DEV_TYPE_IPC,
        .delay = 1,
+       .msic = 1,
        .get_platform_data = &msic_thermal_platform_data,
-       .device_handler = &ipc_device_handler,
 };
 
 sfi_device(msic_thermal_dev_id);
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_pmic_gpio.c b/arch/x86/platform/intel-mid/device_libs/platform_pmic_gpio.c
deleted file mode 100644 (file)
index e30cb62..0000000
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * platform_pmic_gpio.c: PMIC GPIO platform data initialization file
- *
- * (C) Copyright 2013 Intel Corporation
- * Author: Sathyanarayanan Kuppuswamy <sathyanarayanan.kuppuswamy@intel.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; version 2
- * of the License.
- */
-
-#include <linux/kernel.h>
-#include <linux/interrupt.h>
-#include <linux/scatterlist.h>
-#include <linux/gpio.h>
-#include <linux/init.h>
-#include <linux/sfi.h>
-#include <linux/intel_pmic_gpio.h>
-#include <asm/intel-mid.h>
-
-#include "platform_ipc.h"
-
-static void __init *pmic_gpio_platform_data(void *info)
-{
-       static struct intel_pmic_gpio_platform_data pmic_gpio_pdata;
-       int gpio_base = get_gpio_by_name("pmic_gpio_base");
-
-       if (gpio_base < 0)
-               gpio_base = 64;
-       pmic_gpio_pdata.gpio_base = gpio_base;
-       pmic_gpio_pdata.irq_base = gpio_base + INTEL_MID_IRQ_OFFSET;
-       pmic_gpio_pdata.gpiointr = 0xffffeff8;
-
-       return &pmic_gpio_pdata;
-}
-
-static const struct devs_id pmic_gpio_spi_dev_id __initconst = {
-       .name = "pmic_gpio",
-       .type = SFI_DEV_TYPE_SPI,
-       .delay = 1,
-       .get_platform_data = &pmic_gpio_platform_data,
-};
-
-static const struct devs_id pmic_gpio_ipc_dev_id __initconst = {
-       .name = "pmic_gpio",
-       .type = SFI_DEV_TYPE_IPC,
-       .delay = 1,
-       .get_platform_data = &pmic_gpio_platform_data,
-       .device_handler = &ipc_device_handler
-};
-
-sfi_device(pmic_gpio_spi_dev_id);
-sfi_device(pmic_gpio_ipc_dev_id);
index e0607c77a1bd67a06fe49f58c50c1bfe9af9da61..ae7bdeb0e507d8890587814befa83b2048744fc8 100644 (file)
@@ -91,6 +91,7 @@ static unsigned long __init tangier_calibrate_tsc(void)
 static void __init tangier_arch_setup(void)
 {
        x86_platform.calibrate_tsc = tangier_calibrate_tsc;
+       x86_platform.legacy.rtc = 1;
 }
 
 /* tangier arch ops */
index 051d264fce2e607c9841303f6bed94b27b920b85..19b43e3a9f0fe138709ebfcd54a0eda8ce544e4c 100644 (file)
@@ -15,7 +15,6 @@
 #include <linux/interrupt.h>
 #include <linux/scatterlist.h>
 #include <linux/sfi.h>
-#include <linux/intel_pmic_gpio.h>
 #include <linux/spi/spi.h>
 #include <linux/i2c.h>
 #include <linux/skbuff.h>
@@ -226,7 +225,7 @@ int get_gpio_by_name(const char *name)
        return -EINVAL;
 }
 
-void __init intel_scu_device_register(struct platform_device *pdev)
+static void __init intel_scu_ipc_device_register(struct platform_device *pdev)
 {
        if (ipc_next_dev == MAX_IPCDEVS)
                pr_err("too many SCU IPC devices");
@@ -335,10 +334,22 @@ static void __init sfi_handle_ipc_dev(struct sfi_device_table_entry *pentry,
 
        pr_debug("IPC bus, name = %16.16s, irq = 0x%2x\n",
                pentry->name, pentry->irq);
+
+       /*
+        * We need to call platform init of IPC devices to fill misc_pdata
+        * structure. It will be used in msic_init for initialization.
+        */
        pdata = intel_mid_sfi_get_pdata(dev, pentry);
        if (IS_ERR(pdata))
                return;
 
+       /*
+        * On Medfield the platform device creation is handled by the MSIC
+        * MFD driver so we don't need to do it here.
+        */
+       if (dev->msic && intel_mid_has_msic())
+               return;
+
        pdev = platform_device_alloc(pentry->name, 0);
        if (pdev == NULL) {
                pr_err("out of memory for SFI platform device '%s'.\n",
@@ -348,7 +359,10 @@ static void __init sfi_handle_ipc_dev(struct sfi_device_table_entry *pentry,
        install_irq_resource(pdev, pentry->irq);
 
        pdev->dev.platform_data = pdata;
-       platform_device_add(pdev);
+       if (dev->delay)
+               intel_scu_ipc_device_register(pdev);
+       else
+               platform_device_add(pdev);
 }
 
 static void __init sfi_handle_spi_dev(struct sfi_device_table_entry *pentry,
@@ -503,27 +517,23 @@ static int __init sfi_parse_devs(struct sfi_table_header *table)
                if (!dev)
                        continue;
 
-               if (dev->device_handler) {
-                       dev->device_handler(pentry, dev);
-               } else {
-                       switch (pentry->type) {
-                       case SFI_DEV_TYPE_IPC:
-                               sfi_handle_ipc_dev(pentry, dev);
-                               break;
-                       case SFI_DEV_TYPE_SPI:
-                               sfi_handle_spi_dev(pentry, dev);
-                               break;
-                       case SFI_DEV_TYPE_I2C:
-                               sfi_handle_i2c_dev(pentry, dev);
-                               break;
-                       case SFI_DEV_TYPE_SD:
-                               sfi_handle_sd_dev(pentry, dev);
-                               break;
-                       case SFI_DEV_TYPE_UART:
-                       case SFI_DEV_TYPE_HSI:
-                       default:
-                               break;
-                       }
+               switch (pentry->type) {
+               case SFI_DEV_TYPE_IPC:
+                       sfi_handle_ipc_dev(pentry, dev);
+                       break;
+               case SFI_DEV_TYPE_SPI:
+                       sfi_handle_spi_dev(pentry, dev);
+                       break;
+               case SFI_DEV_TYPE_I2C:
+                       sfi_handle_i2c_dev(pentry, dev);
+                       break;
+               case SFI_DEV_TYPE_SD:
+                       sfi_handle_sd_dev(pentry, dev);
+                       break;
+               case SFI_DEV_TYPE_UART:
+               case SFI_DEV_TYPE_HSI:
+               default:
+                       break;
                }
        }
        return 0;
index 8410e7d0a5b531dcc412a15f297255eb6a409579..9743d0ccfec69a3321de67bc7cfd77f1e411eef9 100644 (file)
@@ -45,8 +45,8 @@
  *
  * Handle system-wide NMI events generated by the global 'power nmi' command.
  *
- * Basic operation is to field the NMI interrupt on each cpu and wait
- * until all cpus have arrived into the nmi handler.  If some cpus do not
+ * Basic operation is to field the NMI interrupt on each CPU and wait
+ * until all CPU's have arrived into the nmi handler.  If some CPU's do not
  * make it into the handler, try and force them in with the IPI(NMI) signal.
  *
  * We also have to lessen UV Hub MMR accesses as much as possible as this
@@ -56,7 +56,7 @@
  * To do this we register our primary NMI notifier on the NMI_UNKNOWN
  * chain.  This reduces the number of false NMI calls when the perf
  * tools are running which generate an enormous number of NMIs per
- * second (~4M/s for 1024 cpu threads).  Our secondary NMI handler is
+ * second (~4M/s for 1024 CPU threads).  Our secondary NMI handler is
  * very short as it only checks that if it has been "pinged" with the
  * IPI(NMI) signal as mentioned above, and does not read the UV Hub's MMR.
  *
 static struct uv_hub_nmi_s **uv_hub_nmi_list;
 
 DEFINE_PER_CPU(struct uv_cpu_nmi_s, uv_cpu_nmi);
-EXPORT_PER_CPU_SYMBOL_GPL(uv_cpu_nmi);
 
+/* UV hubless values */
+#define NMI_CONTROL_PORT       0x70
+#define NMI_DUMMY_PORT         0x71
+#define PAD_OWN_GPP_D_0                0x2c
+#define GPI_NMI_STS_GPP_D_0    0x164
+#define GPI_NMI_ENA_GPP_D_0    0x174
+#define STS_GPP_D_0_MASK       0x1
+#define PAD_CFG_DW0_GPP_D_0    0x4c0
+#define GPIROUTNMI             (1ul << 17)
+#define PCH_PCR_GPIO_1_BASE    0xfdae0000ul
+#define PCH_PCR_GPIO_ADDRESS(offset) (int *)((u64)(pch_base) | (u64)(offset))
+
+static u64 *pch_base;
 static unsigned long nmi_mmr;
 static unsigned long nmi_mmr_clear;
 static unsigned long nmi_mmr_pending;
@@ -100,7 +112,7 @@ static int param_get_local64(char *buffer, const struct kernel_param *kp)
 
 static int param_set_local64(const char *val, const struct kernel_param *kp)
 {
-       /* clear on any write */
+       /* Clear on any write */
        local64_set((local64_t *)kp->arg, 0);
        return 0;
 }
@@ -144,16 +156,80 @@ module_param_named(wait_count, uv_nmi_wait_count, int, 0644);
 static int uv_nmi_retry_count = 500;
 module_param_named(retry_count, uv_nmi_retry_count, int, 0644);
 
-/*
- * Valid NMI Actions:
- *  "dump"     - dump process stack for each cpu
- *  "ips"      - dump IP info for each cpu
- *  "kdump"    - do crash dump
- *  "kdb"      - enter KDB (default)
- *  "kgdb"     - enter KGDB
- */
-static char uv_nmi_action[8] = "kdb";
-module_param_string(action, uv_nmi_action, sizeof(uv_nmi_action), 0644);
+static bool uv_pch_intr_enable = true;
+static bool uv_pch_intr_now_enabled;
+module_param_named(pch_intr_enable, uv_pch_intr_enable, bool, 0644);
+
+static bool uv_pch_init_enable = true;
+module_param_named(pch_init_enable, uv_pch_init_enable, bool, 0644);
+
+static int uv_nmi_debug;
+module_param_named(debug, uv_nmi_debug, int, 0644);
+
+#define nmi_debug(fmt, ...)                            \
+       do {                                            \
+               if (uv_nmi_debug)                       \
+                       pr_info(fmt, ##__VA_ARGS__);    \
+       } while (0)
+
+/* Valid NMI Actions */
+#define        ACTION_LEN      16
+static struct nmi_action {
+       char    *action;
+       char    *desc;
+} valid_acts[] = {
+       {       "kdump",        "do kernel crash dump"                  },
+       {       "dump",         "dump process stack for each cpu"       },
+       {       "ips",          "dump Inst Ptr info for each cpu"       },
+       {       "kdb",          "enter KDB (needs kgdboc= assignment)"  },
+       {       "kgdb",         "enter KGDB (needs gdb target remote)"  },
+       {       "health",       "check if CPUs respond to NMI"          },
+};
+typedef char action_t[ACTION_LEN];
+static action_t uv_nmi_action = { "dump" };
+
+static int param_get_action(char *buffer, const struct kernel_param *kp)
+{
+       return sprintf(buffer, "%s\n", uv_nmi_action);
+}
+
+static int param_set_action(const char *val, const struct kernel_param *kp)
+{
+       int i;
+       int n = ARRAY_SIZE(valid_acts);
+       char arg[ACTION_LEN], *p;
+
+       /* (remove possible '\n') */
+       strncpy(arg, val, ACTION_LEN - 1);
+       arg[ACTION_LEN - 1] = '\0';
+       p = strchr(arg, '\n');
+       if (p)
+               *p = '\0';
+
+       for (i = 0; i < n; i++)
+               if (!strcmp(arg, valid_acts[i].action))
+                       break;
+
+       if (i < n) {
+               strcpy(uv_nmi_action, arg);
+               pr_info("UV: New NMI action:%s\n", uv_nmi_action);
+               return 0;
+       }
+
+       pr_err("UV: Invalid NMI action:%s, valid actions are:\n", arg);
+       for (i = 0; i < n; i++)
+               pr_err("UV: %-8s - %s\n",
+                       valid_acts[i].action, valid_acts[i].desc);
+       return -EINVAL;
+}
+
+static const struct kernel_param_ops param_ops_action = {
+       .get = param_get_action,
+       .set = param_set_action,
+};
+#define param_check_action(name, p) __param_check(name, p, action_t)
+
+module_param_named(action, uv_nmi_action, action, 0644);
 
 static inline bool uv_nmi_action_is(const char *action)
 {
@@ -192,8 +268,200 @@ static inline void uv_local_mmr_clear_nmi(void)
 }
 
 /*
- * If first cpu in on this hub, set hub_nmi "in_nmi" and "owner" values and
- * return true.  If first cpu in on the system, set global "in_nmi" flag.
+ * UV hubless NMI handler functions
+ */
+static inline void uv_reassert_nmi(void)
+{
+       /* (from arch/x86/include/asm/mach_traps.h) */
+       outb(0x8f, NMI_CONTROL_PORT);
+       inb(NMI_DUMMY_PORT);            /* dummy read */
+       outb(0x0f, NMI_CONTROL_PORT);
+       inb(NMI_DUMMY_PORT);            /* dummy read */
+}
+
+static void uv_init_hubless_pch_io(int offset, int mask, int data)
+{
+       int *addr = PCH_PCR_GPIO_ADDRESS(offset);
+       int readd = readl(addr);
+
+       if (mask) {                     /* OR in new data */
+               int writed = (readd & ~mask) | data;
+
+               nmi_debug("UV:PCH: %p = %x & %x | %x (%x)\n",
+                       addr, readd, ~mask, data, writed);
+               writel(writed, addr);
+       } else if (readd & data) {      /* clear status bit */
+               nmi_debug("UV:PCH: %p = %x\n", addr, data);
+               writel(data, addr);
+       }
+
+       (void)readl(addr);              /* flush write data */
+}
+
+static void uv_nmi_setup_hubless_intr(void)
+{
+       uv_pch_intr_now_enabled = uv_pch_intr_enable;
+
+       uv_init_hubless_pch_io(
+               PAD_CFG_DW0_GPP_D_0, GPIROUTNMI,
+               uv_pch_intr_now_enabled ? GPIROUTNMI : 0);
+
+       nmi_debug("UV:NMI: GPP_D_0 interrupt %s\n",
+               uv_pch_intr_now_enabled ? "enabled" : "disabled");
+}
+
+static struct init_nmi {
+       unsigned int    offset;
+       unsigned int    mask;
+       unsigned int    data;
+} init_nmi[] = {
+       {       /* HOSTSW_OWN_GPP_D_0 */
+       .offset = 0x84,
+       .mask = 0x1,
+       .data = 0x0,    /* ACPI Mode */
+       },
+
+/* Clear status: */
+       {       /* GPI_INT_STS_GPP_D_0 */
+       .offset = 0x104,
+       .mask = 0x0,
+       .data = 0x1,    /* Clear Status */
+       },
+       {       /* GPI_GPE_STS_GPP_D_0 */
+       .offset = 0x124,
+       .mask = 0x0,
+       .data = 0x1,    /* Clear Status */
+       },
+       {       /* GPI_SMI_STS_GPP_D_0 */
+       .offset = 0x144,
+       .mask = 0x0,
+       .data = 0x1,    /* Clear Status */
+       },
+       {       /* GPI_NMI_STS_GPP_D_0 */
+       .offset = 0x164,
+       .mask = 0x0,
+       .data = 0x1,    /* Clear Status */
+       },
+
+/* Disable interrupts: */
+       {       /* GPI_INT_EN_GPP_D_0 */
+       .offset = 0x114,
+       .mask = 0x1,
+       .data = 0x0,    /* Disable interrupt generation */
+       },
+       {       /* GPI_GPE_EN_GPP_D_0 */
+       .offset = 0x134,
+       .mask = 0x1,
+       .data = 0x0,    /* Disable interrupt generation */
+       },
+       {       /* GPI_SMI_EN_GPP_D_0 */
+       .offset = 0x154,
+       .mask = 0x1,
+       .data = 0x0,    /* Disable interrupt generation */
+       },
+       {       /* GPI_NMI_EN_GPP_D_0 */
+       .offset = 0x174,
+       .mask = 0x1,
+       .data = 0x0,    /* Disable interrupt generation */
+       },
+
+/* Setup GPP_D_0 Pad Config: */
+       {       /* PAD_CFG_DW0_GPP_D_0 */
+       .offset = 0x4c0,
+       .mask = 0xffffffff,
+       .data = 0x82020100,
+/*
+ *  31:30 Pad Reset Config (PADRSTCFG): = 2h  # PLTRST# (default)
+ *
+ *  29    RX Pad State Select (RXPADSTSEL): = 0 # Raw RX pad state directly
+ *                                                from RX buffer (default)
+ *
+ *  28    RX Raw Override to '1' (RXRAW1): = 0 # No Override
+ *
+ *  26:25 RX Level/Edge Configuration (RXEVCFG):
+ *      = 0h # Level
+ *      = 1h # Edge
+ *
+ *  23    RX Invert (RXINV): = 0 # No Inversion (signal active high)
+ *
+ *  20    GPIO Input Route IOxAPIC (GPIROUTIOXAPIC):
+ * = 0 # Routing does not cause peripheral IRQ...
+ *     # (we want an NMI not an IRQ)
+ *
+ *  19    GPIO Input Route SCI (GPIROUTSCI): = 0 # Routing does not cause SCI.
+ *  18    GPIO Input Route SMI (GPIROUTSMI): = 0 # Routing does not cause SMI.
+ *  17    GPIO Input Route NMI (GPIROUTNMI): = 1 # Routing can cause NMI.
+ *
+ *  11:10 Pad Mode (PMODE1/0): = 0h = GPIO control the Pad.
+ *   9    GPIO RX Disable (GPIORXDIS):
+ * = 0 # Enable the input buffer (active low enable)
+ *
+ *   8    GPIO TX Disable (GPIOTXDIS):
+ * = 1 # Disable the output buffer; i.e. Hi-Z
+ *
+ *   1 GPIO RX State (GPIORXSTATE): This is the current internal RX pad state..
+ *   0 GPIO TX State (GPIOTXSTATE):
+ * = 0 # (Leave at default)
+ */
+       },
+
+/* Pad Config DW1 */
+       {       /* PAD_CFG_DW1_GPP_D_0 */
+       .offset = 0x4c4,
+       .mask = 0x3c00,
+       .data = 0,      /* Termination = none (default) */
+       },
+};
+
+static void uv_init_hubless_pch_d0(void)
+{
+       int i, read;
+
+       read = *PCH_PCR_GPIO_ADDRESS(PAD_OWN_GPP_D_0);
+       if (read != 0) {
+               pr_info("UV: Hubless NMI already configured\n");
+               return;
+       }
+
+       nmi_debug("UV: Initializing UV Hubless NMI on PCH\n");
+       for (i = 0; i < ARRAY_SIZE(init_nmi); i++) {
+               uv_init_hubless_pch_io(init_nmi[i].offset,
+                                       init_nmi[i].mask,
+                                       init_nmi[i].data);
+       }
+}
+
+static int uv_nmi_test_hubless(struct uv_hub_nmi_s *hub_nmi)
+{
+       int *pstat = PCH_PCR_GPIO_ADDRESS(GPI_NMI_STS_GPP_D_0);
+       int status = *pstat;
+
+       hub_nmi->nmi_value = status;
+       atomic_inc(&hub_nmi->read_mmr_count);
+
+       if (!(status & STS_GPP_D_0_MASK))       /* Not a UV external NMI */
+               return 0;
+
+       *pstat = STS_GPP_D_0_MASK;      /* Is a UV NMI: clear GPP_D_0 status */
+       (void)*pstat;                   /* Flush write */
+
+       return 1;
+}
+
+static int uv_test_nmi(struct uv_hub_nmi_s *hub_nmi)
+{
+       if (hub_nmi->hub_present)
+               return uv_nmi_test_mmr(hub_nmi);
+
+       if (hub_nmi->pch_owner)         /* Only PCH owner can check status */
+               return uv_nmi_test_hubless(hub_nmi);
+
+       return -1;
+}
+
+/*
+ * If first CPU in on this hub, set hub_nmi "in_nmi" and "owner" values and
+ * return true.  If first CPU in on the system, set global "in_nmi" flag.
  */
 static int uv_set_in_nmi(int cpu, struct uv_hub_nmi_s *hub_nmi)
 {
@@ -214,6 +482,7 @@ static int uv_check_nmi(struct uv_hub_nmi_s *hub_nmi)
 {
        int cpu = smp_processor_id();
        int nmi = 0;
+       int nmi_detected = 0;
 
        local64_inc(&uv_nmi_count);
        this_cpu_inc(uv_cpu_nmi.queries);
@@ -224,35 +493,48 @@ static int uv_check_nmi(struct uv_hub_nmi_s *hub_nmi)
                        break;
 
                if (raw_spin_trylock(&hub_nmi->nmi_lock)) {
+                       nmi_detected = uv_test_nmi(hub_nmi);
 
-                       /* check hub MMR NMI flag */
-                       if (uv_nmi_test_mmr(hub_nmi)) {
+                       /* Check flag for UV external NMI */
+                       if (nmi_detected > 0) {
                                uv_set_in_nmi(cpu, hub_nmi);
                                nmi = 1;
                                break;
                        }
 
-                       /* MMR NMI flag is clear */
+                       /* A non-PCH node in a hubless system waits for NMI */
+                       else if (nmi_detected < 0)
+                               goto slave_wait;
+
+                       /* MMR/PCH NMI flag is clear */
                        raw_spin_unlock(&hub_nmi->nmi_lock);
 
                } else {
-                       /* wait a moment for the hub nmi locker to set flag */
-                       cpu_relax();
+
+                       /* Wait a moment for the HUB NMI locker to set flag */
+slave_wait:            cpu_relax();
                        udelay(uv_nmi_slave_delay);
 
-                       /* re-check hub in_nmi flag */
+                       /* Re-check hub in_nmi flag */
                        nmi = atomic_read(&hub_nmi->in_nmi);
                        if (nmi)
                                break;
                }
 
-               /* check if this BMC missed setting the MMR NMI flag */
+               /*
+                * Check if this BMC missed setting the MMR NMI flag (or)
+                * UV hubless system where only PCH owner can check flag
+                */
                if (!nmi) {
                        nmi = atomic_read(&uv_in_nmi);
                        if (nmi)
                                uv_set_in_nmi(cpu, hub_nmi);
                }
 
+               /* If we're holding the hub lock, release it now */
+               if (nmi_detected < 0)
+                       raw_spin_unlock(&hub_nmi->nmi_lock);
+
        } while (0);
 
        if (!nmi)
@@ -269,12 +551,15 @@ static inline void uv_clear_nmi(int cpu)
        if (cpu == atomic_read(&hub_nmi->cpu_owner)) {
                atomic_set(&hub_nmi->cpu_owner, -1);
                atomic_set(&hub_nmi->in_nmi, 0);
-               uv_local_mmr_clear_nmi();
+               if (hub_nmi->hub_present)
+                       uv_local_mmr_clear_nmi();
+               else
+                       uv_reassert_nmi();
                raw_spin_unlock(&hub_nmi->nmi_lock);
        }
 }
 
-/* Ping non-responding cpus attemping to force them into the NMI handler */
+/* Ping non-responding CPU's attemping to force them into the NMI handler */
 static void uv_nmi_nr_cpus_ping(void)
 {
        int cpu;
@@ -285,7 +570,7 @@ static void uv_nmi_nr_cpus_ping(void)
        apic->send_IPI_mask(uv_nmi_cpu_mask, APIC_DM_NMI);
 }
 
-/* Clean up flags for cpus that ignored both NMI and ping */
+/* Clean up flags for CPU's that ignored both NMI and ping */
 static void uv_nmi_cleanup_mask(void)
 {
        int cpu;
@@ -297,11 +582,12 @@ static void uv_nmi_cleanup_mask(void)
        }
 }
 
-/* Loop waiting as cpus enter nmi handler */
+/* Loop waiting as CPU's enter NMI handler */
 static int uv_nmi_wait_cpus(int first)
 {
        int i, j, k, n = num_online_cpus();
        int last_k = 0, waiting = 0;
+       int cpu = smp_processor_id();
 
        if (first) {
                cpumask_copy(uv_nmi_cpu_mask, cpu_online_mask);
@@ -310,6 +596,12 @@ static int uv_nmi_wait_cpus(int first)
                k = n - cpumask_weight(uv_nmi_cpu_mask);
        }
 
+       /* PCH NMI causes only one CPU to respond */
+       if (first && uv_pch_intr_now_enabled) {
+               cpumask_clear_cpu(cpu, uv_nmi_cpu_mask);
+               return n - k - 1;
+       }
+
        udelay(uv_nmi_initial_delay);
        for (i = 0; i < uv_nmi_retry_count; i++) {
                int loop_delay = uv_nmi_loop_delay;
@@ -325,13 +617,13 @@ static int uv_nmi_wait_cpus(int first)
                        k = n;
                        break;
                }
-               if (last_k != k) {      /* abort if no new cpus coming in */
+               if (last_k != k) {      /* abort if no new CPU's coming in */
                        last_k = k;
                        waiting = 0;
                } else if (++waiting > uv_nmi_wait_count)
                        break;
 
-               /* extend delay if waiting only for cpu 0 */
+               /* Extend delay if waiting only for CPU 0: */
                if (waiting && (n - k) == 1 &&
                    cpumask_test_cpu(0, uv_nmi_cpu_mask))
                        loop_delay *= 100;
@@ -342,29 +634,29 @@ static int uv_nmi_wait_cpus(int first)
        return n - k;
 }
 
-/* Wait until all slave cpus have entered UV NMI handler */
+/* Wait until all slave CPU's have entered UV NMI handler */
 static void uv_nmi_wait(int master)
 {
-       /* indicate this cpu is in */
+       /* Indicate this CPU is in: */
        this_cpu_write(uv_cpu_nmi.state, UV_NMI_STATE_IN);
 
-       /* if not the first cpu in (the master), then we are a slave cpu */
+       /* If not the first CPU in (the master), then we are a slave CPU */
        if (!master)
                return;
 
        do {
-               /* wait for all other cpus to gather here */
+               /* Wait for all other CPU's to gather here */
                if (!uv_nmi_wait_cpus(1))
                        break;
 
-               /* if not all made it in, send IPI NMI to them */
-               pr_alert("UV: Sending NMI IPI to %d non-responding CPUs: %*pbl\n",
+               /* If not all made it in, send IPI NMI to them */
+               pr_alert("UV: Sending NMI IPI to %d CPUs: %*pbl\n",
                         cpumask_weight(uv_nmi_cpu_mask),
                         cpumask_pr_args(uv_nmi_cpu_mask));
 
                uv_nmi_nr_cpus_ping();
 
-               /* if all cpus are in, then done */
+               /* If all CPU's are in, then done */
                if (!uv_nmi_wait_cpus(0))
                        break;
 
@@ -416,7 +708,7 @@ static void uv_nmi_dump_state_cpu(int cpu, struct pt_regs *regs)
        this_cpu_write(uv_cpu_nmi.state, UV_NMI_STATE_DUMP_DONE);
 }
 
-/* Trigger a slave cpu to dump it's state */
+/* Trigger a slave CPU to dump it's state */
 static void uv_nmi_trigger_dump(int cpu)
 {
        int retry = uv_nmi_trigger_delay;
@@ -437,7 +729,7 @@ static void uv_nmi_trigger_dump(int cpu)
        uv_cpu_nmi_per(cpu).state = UV_NMI_STATE_DUMP_DONE;
 }
 
-/* Wait until all cpus ready to exit */
+/* Wait until all CPU's ready to exit */
 static void uv_nmi_sync_exit(int master)
 {
        atomic_dec(&uv_nmi_cpus_in_nmi);
@@ -451,7 +743,23 @@ static void uv_nmi_sync_exit(int master)
        }
 }
 
-/* Walk through cpu list and dump state of each */
+/* Current "health" check is to check which CPU's are responsive */
+static void uv_nmi_action_health(int cpu, struct pt_regs *regs, int master)
+{
+       if (master) {
+               int in = atomic_read(&uv_nmi_cpus_in_nmi);
+               int out = num_online_cpus() - in;
+
+               pr_alert("UV: NMI CPU health check (non-responding:%d)\n", out);
+               atomic_set(&uv_nmi_slave_continue, SLAVE_EXIT);
+       } else {
+               while (!atomic_read(&uv_nmi_slave_continue))
+                       cpu_relax();
+       }
+       uv_nmi_sync_exit(master);
+}
+
+/* Walk through CPU list and dump state of each */
 static void uv_nmi_dump_state(int cpu, struct pt_regs *regs, int master)
 {
        if (master) {
@@ -538,7 +846,7 @@ static inline int uv_nmi_kdb_reason(void)
 #else /* !CONFIG_KGDB_KDB */
 static inline int uv_nmi_kdb_reason(void)
 {
-       /* Insure user is expecting to attach gdb remote */
+       /* Ensure user is expecting to attach gdb remote */
        if (uv_nmi_action_is("kgdb"))
                return 0;
 
@@ -563,7 +871,7 @@ static void uv_call_kgdb_kdb(int cpu, struct pt_regs *regs, int master)
                if (reason < 0)
                        return;
 
-               /* call KGDB NMI handler as MASTER */
+               /* Call KGDB NMI handler as MASTER */
                ret = kgdb_nmicallin(cpu, X86_TRAP_NMI, regs, reason,
                                &uv_nmi_slave_continue);
                if (ret) {
@@ -571,7 +879,7 @@ static void uv_call_kgdb_kdb(int cpu, struct pt_regs *regs, int master)
                        atomic_set(&uv_nmi_slave_continue, SLAVE_EXIT);
                }
        } else {
-               /* wait for KGDB signal that it's ready for slaves to enter */
+               /* Wait for KGDB signal that it's ready for slaves to enter */
                int sig;
 
                do {
@@ -579,7 +887,7 @@ static void uv_call_kgdb_kdb(int cpu, struct pt_regs *regs, int master)
                        sig = atomic_read(&uv_nmi_slave_continue);
                } while (!sig);
 
-               /* call KGDB as slave */
+               /* Call KGDB as slave */
                if (sig == SLAVE_CONTINUE)
                        kgdb_nmicallback(cpu, regs);
        }
@@ -623,18 +931,23 @@ int uv_handle_nmi(unsigned int reason, struct pt_regs *regs)
                        strncpy(uv_nmi_action, "dump", strlen(uv_nmi_action));
        }
 
-       /* Pause as all cpus enter the NMI handler */
+       /* Pause as all CPU's enter the NMI handler */
        uv_nmi_wait(master);
 
-       /* Dump state of each cpu */
-       if (uv_nmi_action_is("ips") || uv_nmi_action_is("dump"))
+       /* Process actions other than "kdump": */
+       if (uv_nmi_action_is("health")) {
+               uv_nmi_action_health(cpu, regs, master);
+       } else if (uv_nmi_action_is("ips") || uv_nmi_action_is("dump")) {
                uv_nmi_dump_state(cpu, regs, master);
-
-       /* Call KGDB/KDB if enabled */
-       else if (uv_nmi_action_is("kdb") || uv_nmi_action_is("kgdb"))
+       } else if (uv_nmi_action_is("kdb") || uv_nmi_action_is("kgdb")) {
                uv_call_kgdb_kdb(cpu, regs, master);
+       } else {
+               if (master)
+                       pr_alert("UV: unknown NMI action: %s\n", uv_nmi_action);
+               uv_nmi_sync_exit(master);
+       }
 
-       /* Clear per_cpu "in nmi" flag */
+       /* Clear per_cpu "in_nmi" flag */
        this_cpu_write(uv_cpu_nmi.state, UV_NMI_STATE_OUT);
 
        /* Clear MMR NMI flag on each hub */
@@ -648,6 +961,7 @@ int uv_handle_nmi(unsigned int reason, struct pt_regs *regs)
                atomic_set(&uv_nmi_cpu, -1);
                atomic_set(&uv_in_nmi, 0);
                atomic_set(&uv_nmi_kexec_failed, 0);
+               atomic_set(&uv_nmi_slave_continue, SLAVE_CLEAR);
        }
 
        uv_nmi_touch_watchdogs();
@@ -657,7 +971,7 @@ int uv_handle_nmi(unsigned int reason, struct pt_regs *regs)
 }
 
 /*
- * NMI handler for pulling in CPUs when perf events are grabbing our NMI
+ * NMI handler for pulling in CPU's when perf events are grabbing our NMI
  */
 static int uv_handle_nmi_ping(unsigned int reason, struct pt_regs *regs)
 {
@@ -690,35 +1004,62 @@ void uv_nmi_init(void)
        unsigned int value;
 
        /*
-        * Unmask NMI on all cpus
+        * Unmask NMI on all CPU's
         */
        value = apic_read(APIC_LVT1) | APIC_DM_NMI;
        value &= ~APIC_LVT_MASKED;
        apic_write(APIC_LVT1, value);
 }
 
-void uv_nmi_setup(void)
+/* Setup HUB NMI info */
+void __init uv_nmi_setup_common(bool hubbed)
 {
        int size = sizeof(void *) * (1 << NODES_SHIFT);
-       int cpu, nid;
+       int cpu;
 
-       /* Setup hub nmi info */
-       uv_nmi_setup_mmrs();
        uv_hub_nmi_list = kzalloc(size, GFP_KERNEL);
-       pr_info("UV: NMI hub list @ 0x%p (%d)\n", uv_hub_nmi_list, size);
+       nmi_debug("UV: NMI hub list @ 0x%p (%d)\n", uv_hub_nmi_list, size);
        BUG_ON(!uv_hub_nmi_list);
        size = sizeof(struct uv_hub_nmi_s);
        for_each_present_cpu(cpu) {
-               nid = cpu_to_node(cpu);
+               int nid = cpu_to_node(cpu);
                if (uv_hub_nmi_list[nid] == NULL) {
                        uv_hub_nmi_list[nid] = kzalloc_node(size,
                                                            GFP_KERNEL, nid);
                        BUG_ON(!uv_hub_nmi_list[nid]);
                        raw_spin_lock_init(&(uv_hub_nmi_list[nid]->nmi_lock));
                        atomic_set(&uv_hub_nmi_list[nid]->cpu_owner, -1);
+                       uv_hub_nmi_list[nid]->hub_present = hubbed;
+                       uv_hub_nmi_list[nid]->pch_owner = (nid == 0);
                }
                uv_hub_nmi_per(cpu) = uv_hub_nmi_list[nid];
        }
        BUG_ON(!alloc_cpumask_var(&uv_nmi_cpu_mask, GFP_KERNEL));
+}
+
+/* Setup for UV Hub systems */
+void __init uv_nmi_setup(void)
+{
+       uv_nmi_setup_mmrs();
+       uv_nmi_setup_common(true);
+       uv_register_nmi_notifier();
+       pr_info("UV: Hub NMI enabled\n");
+}
+
+/* Setup for UV Hubless systems */
+void __init uv_nmi_setup_hubless(void)
+{
+       uv_nmi_setup_common(false);
+       pch_base = xlate_dev_mem_ptr(PCH_PCR_GPIO_1_BASE);
+       nmi_debug("UV: PCH base:%p from 0x%lx, GPP_D_0\n",
+               pch_base, PCH_PCR_GPIO_1_BASE);
+       if (uv_pch_init_enable)
+               uv_init_hubless_pch_d0();
+       uv_init_hubless_pch_io(GPI_NMI_ENA_GPP_D_0,
+                               STS_GPP_D_0_MASK, STS_GPP_D_0_MASK);
+       uv_nmi_setup_hubless_intr();
+       /* Ensure NMI enabled in Processor Interface Reg: */
+       uv_reassert_nmi();
        uv_register_nmi_notifier();
+       pr_info("UV: Hubless NMI enabled\n");
 }
index d957d5f21a86563e5259cf6cca9228fb59646118..0bc60a30873088208ddf221dfaac8000890ee8ca 100644 (file)
@@ -1,6 +1,6 @@
 config MCE_AMD_INJ
        tristate "Simple MCE injection interface for AMD processors"
-       depends on RAS && EDAC_DECODE_MCE && DEBUG_FS && AMD_NB
+       depends on RAS && X86_MCE && DEBUG_FS && AMD_NB
        default n
        help
          This is a simple debugfs interface to inject MCEs and test different
index e8a9ea7d7a211f5f98f9563de6dc13085185b852..25a7c4302ce70390e81f98be0df2324a725364d5 100644 (file)
@@ -141,25 +141,6 @@ void __init xen_init_spinlocks(void)
        pv_lock_ops.vcpu_is_preempted = PV_CALLEE_SAVE(xen_vcpu_stolen);
 }
 
-/*
- * While the jump_label init code needs to happend _after_ the jump labels are
- * enabled and before SMP is started. Hence we use pre-SMP initcall level
- * init. We cannot do it in xen_init_spinlocks as that is done before
- * jump labels are activated.
- */
-static __init int xen_init_spinlocks_jump(void)
-{
-       if (!xen_pvspin)
-               return 0;
-
-       if (!xen_domain())
-               return 0;
-
-       static_key_slow_inc(&paravirt_ticketlocks_enabled);
-       return 0;
-}
-early_initcall(xen_init_spinlocks_jump);
-
 static __init int xen_parse_nopvspin(char *arg)
 {
        xen_pvspin = false;
index b7fbaa56b51a573f393ce6d6abb6ec5c1895c27e..9e9760b20be583689bc140a9559a6b83f49acd0c 100644 (file)
@@ -1,7 +1,6 @@
 generic-y += bitsperlong.h
 generic-y += bug.h
 generic-y += clkdev.h
-generic-y += cputime.h
 generic-y += div64.h
 generic-y += dma-contiguous.h
 generic-y += emergency-restart.h
index 848e8568fb3c4a90c2eb89783c0420f8b5526cd6..8fd4be610607c2683b16a3e0da4249f4aea732e4 100644 (file)
@@ -419,7 +419,7 @@ subsys_initcall(topology_init);
 
 void cpu_reset(void)
 {
-#if XCHAL_HAVE_PTP_MMU
+#if XCHAL_HAVE_PTP_MMU && IS_ENABLED(CONFIG_MMU)
        local_irq_disable();
        /*
         * We have full MMU: all autoload ways, ways 7, 8 and 9 of DTLB must
index f8c82a9b401222c84d5d014ac339439a1708caba..ed1e78e24db0053e993555e14bf730c1cfb868c2 100644 (file)
@@ -306,11 +306,6 @@ int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
        if (ret == 0 || (ret && ret != -EOPNOTSUPP))
                goto out;
 
-       ret = __blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask,
-                       ZERO_PAGE(0), biop);
-       if (ret == 0 || (ret && ret != -EOPNOTSUPP))
-               goto out;
-
        ret = 0;
        while (nr_sects != 0) {
                bio = next_bio(bio, min(nr_sects, (sector_t)BIO_MAX_PAGES),
@@ -369,6 +364,10 @@ int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
                        return 0;
        }
 
+       if (!blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask,
+                       ZERO_PAGE(0)))
+               return 0;
+
        blk_start_plug(&plug);
        ret = __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask,
                        &bio, discard);
index c73a6fcaeb9d52c7e2aa1ebbcf725e86cdc707f4..838f07e2b64a207c0f4d46477a4ffb81d0e4a60b 100644 (file)
@@ -3758,7 +3758,7 @@ static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
 }
 
 #ifdef CONFIG_CFQ_GROUP_IOSCHED
-static void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio)
+static bool check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio)
 {
        struct cfq_data *cfqd = cic_to_cfqd(cic);
        struct cfq_queue *cfqq;
@@ -3775,15 +3775,7 @@ static void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio)
         * spuriously on a newly created cic but there's no harm.
         */
        if (unlikely(!cfqd) || likely(cic->blkcg_serial_nr == serial_nr))
-               return;
-
-       /*
-        * If we have a non-root cgroup, we can depend on that to
-        * do proper throttling of writes. Turn off wbt for that
-        * case, if it was enabled by default.
-        */
-       if (nonroot_cg)
-               wbt_disable_default(cfqd->queue);
+               return nonroot_cg;
 
        /*
         * Drop reference to queues.  New queues will be assigned in new
@@ -3804,9 +3796,13 @@ static void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio)
        }
 
        cic->blkcg_serial_nr = serial_nr;
+       return nonroot_cg;
 }
 #else
-static inline void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio) { }
+static inline bool check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio)
+{
+       return false;
+}
 #endif  /* CONFIG_CFQ_GROUP_IOSCHED */
 
 static struct cfq_queue **
@@ -4448,11 +4444,12 @@ cfq_set_request(struct request_queue *q, struct request *rq, struct bio *bio,
        const int rw = rq_data_dir(rq);
        const bool is_sync = rq_is_sync(rq);
        struct cfq_queue *cfqq;
+       bool disable_wbt;
 
        spin_lock_irq(q->queue_lock);
 
        check_ioprio_changed(cic, bio);
-       check_blkcg_changed(cic, bio);
+       disable_wbt = check_blkcg_changed(cic, bio);
 new_queue:
        cfqq = cic_to_cfqq(cic, is_sync);
        if (!cfqq || cfqq == &cfqd->oom_cfqq) {
@@ -4488,6 +4485,10 @@ new_queue:
        rq->elv.priv[0] = cfqq;
        rq->elv.priv[1] = cfqq->cfqg;
        spin_unlock_irq(q->queue_lock);
+
+       if (disable_wbt)
+               wbt_disable_default(q);
+
        return 0;
 }
 
index df939b54b09f731eac02657957f3f573c51a1ec5..1fad2a6b3bbbf0d1d4ee07f585bdc4d501467b5d 100644 (file)
@@ -356,6 +356,7 @@ int crypto_register_alg(struct crypto_alg *alg)
        struct crypto_larval *larval;
        int err;
 
+       alg->cra_flags &= ~CRYPTO_ALG_DEAD;
        err = crypto_check_alg(alg);
        if (err)
                return err;
index f849311e9fd4c94e57d81ba97279ec5fb0cb0ded..533265f110e0297b9fc1e14a7a215a54d634b8d8 100644 (file)
@@ -661,9 +661,9 @@ static int aead_recvmsg_sync(struct socket *sock, struct msghdr *msg, int flags)
 unlock:
        list_for_each_entry_safe(rsgl, tmp, &ctx->list, list) {
                af_alg_free_sg(&rsgl->sgl);
+               list_del(&rsgl->list);
                if (rsgl != &ctx->first_rsgl)
                        sock_kfree_s(sk, rsgl, sizeof(*rsgl));
-               list_del(&rsgl->list);
        }
        INIT_LIST_HEAD(&ctx->list);
        aead_wmem_wakeup(sk);
index 9ed087853deed4255907be620b477d12967920e4..a391bbc48105ae6cf3504775c60b00b4dcd5705e 100644 (file)
@@ -55,7 +55,7 @@ acpi-$(CONFIG_DEBUG_FS)               += debugfs.o
 acpi-$(CONFIG_ACPI_NUMA)       += numa.o
 acpi-$(CONFIG_ACPI_PROCFS_POWER) += cm_sbs.o
 acpi-y                         += acpi_lpat.o
-acpi-$(CONFIG_ACPI_GENERIC_GSI) += gsi.o
+acpi-$(CONFIG_ACPI_GENERIC_GSI) += irq.o
 acpi-$(CONFIG_ACPI_WATCHDOG)   += acpi_watchdog.o
 
 # These are (potentially) separate modules
index b3842ffc19ba20a210bec8dd8dc5f56cf9119de9..a15270a806fcab41bae160937cb09c11515056f2 100644 (file)
@@ -212,6 +212,7 @@ static bool __init extlog_get_l1addr(void)
 }
 static struct notifier_block extlog_mce_dec = {
        .notifier_call  = extlog_print,
+       .priority       = MCE_PRIO_EXTLOG,
 };
 
 static int __init extlog_init(void)
index 82b0b571097960919ce6ed36a703402422ac3cc7..b0399e8f6d27df774b175cb2a3aa7c8f3cce7189 100644 (file)
@@ -852,23 +852,18 @@ acpi_tb_install_and_load_table(acpi_physical_address address,
 
        ACPI_FUNCTION_TRACE(tb_install_and_load_table);
 
-       (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
-
        /* Install the table and load it into the namespace */
 
        status = acpi_tb_install_standard_table(address, flags, TRUE,
                                                override, &i);
        if (ACPI_FAILURE(status)) {
-               goto unlock_and_exit;
+               goto exit;
        }
 
-       (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
        status = acpi_tb_load_table(i, acpi_gbl_root_node);
-       (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
 
-unlock_and_exit:
+exit:
        *table_index = i;
-       (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
        return_ACPI_STATUS(status);
 }
 
index 5fdf251a9f9797a2a00f479880d6449dfa6dd940..01e1b3d63fc0dc8ae0e0b17767dae535be2bec68 100644 (file)
@@ -217,6 +217,10 @@ acpi_tb_install_standard_table(acpi_physical_address address,
                goto release_and_exit;
        }
 
+       /* Acquire the table lock */
+
+       (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
+
        if (reload) {
                /*
                 * Validate the incoming table signature.
@@ -244,7 +248,7 @@ acpi_tb_install_standard_table(acpi_physical_address address,
                                         new_table_desc.signature.integer));
 
                        status = AE_BAD_SIGNATURE;
-                       goto release_and_exit;
+                       goto unlock_and_exit;
                }
 
                /* Check if table is already registered */
@@ -279,7 +283,7 @@ acpi_tb_install_standard_table(acpi_physical_address address,
                                /* Table is still loaded, this is an error */
 
                                status = AE_ALREADY_EXISTS;
-                               goto release_and_exit;
+                               goto unlock_and_exit;
                        } else {
                                /*
                                 * Table was unloaded, allow it to be reloaded.
@@ -290,6 +294,7 @@ acpi_tb_install_standard_table(acpi_physical_address address,
                                 * indicate the re-installation.
                                 */
                                acpi_tb_uninstall_table(&new_table_desc);
+                               (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
                                *table_index = i;
                                return_ACPI_STATUS(AE_OK);
                        }
@@ -303,11 +308,19 @@ acpi_tb_install_standard_table(acpi_physical_address address,
 
        /* Invoke table handler if present */
 
+       (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
        if (acpi_gbl_table_handler) {
                (void)acpi_gbl_table_handler(ACPI_TABLE_EVENT_INSTALL,
                                             new_table_desc.pointer,
                                             acpi_gbl_table_handler_context);
        }
+       (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
+
+unlock_and_exit:
+
+       /* Release the table lock */
+
+       (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
 
 release_and_exit:
 
index e0d2e6e6e40caf03b7c1bf4712664071e433becf..3752521c62aba1fc71700646f53ca241f305fda4 100644 (file)
@@ -536,7 +536,7 @@ static const struct iommu_ops *iort_iommu_xlate(struct device *dev,
                if (!iort_fwnode)
                        return NULL;
 
-               ops = iommu_get_instance(iort_fwnode);
+               ops = iommu_ops_from_fwnode(iort_fwnode);
                if (!ops)
                        return NULL;
 
index 75f128e766a979e38a3a91ca00ec8fbde5b686dc..ca28aa572aa9543f5fc148221fc5b902c6fdfd02 100644 (file)
 #include <linux/sysfs.h>
 #include <linux/efi-bgrt.h>
 
+static void *bgrt_image;
 static struct kobject *bgrt_kobj;
 
 static ssize_t show_version(struct device *dev,
                            struct device_attribute *attr, char *buf)
 {
-       return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab->version);
+       return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab.version);
 }
 static DEVICE_ATTR(version, S_IRUGO, show_version, NULL);
 
 static ssize_t show_status(struct device *dev,
                           struct device_attribute *attr, char *buf)
 {
-       return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab->status);
+       return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab.status);
 }
 static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
 
 static ssize_t show_type(struct device *dev,
                         struct device_attribute *attr, char *buf)
 {
-       return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab->image_type);
+       return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab.image_type);
 }
 static DEVICE_ATTR(type, S_IRUGO, show_type, NULL);
 
 static ssize_t show_xoffset(struct device *dev,
                            struct device_attribute *attr, char *buf)
 {
-       return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab->image_offset_x);
+       return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab.image_offset_x);
 }
 static DEVICE_ATTR(xoffset, S_IRUGO, show_xoffset, NULL);
 
 static ssize_t show_yoffset(struct device *dev,
                            struct device_attribute *attr, char *buf)
 {
-       return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab->image_offset_y);
+       return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab.image_offset_y);
 }
 static DEVICE_ATTR(yoffset, S_IRUGO, show_yoffset, NULL);
 
@@ -84,15 +85,24 @@ static int __init bgrt_init(void)
 {
        int ret;
 
-       if (!bgrt_image)
+       if (!bgrt_tab.image_address)
                return -ENODEV;
 
+       bgrt_image = memremap(bgrt_tab.image_address, bgrt_image_size,
+                             MEMREMAP_WB);
+       if (!bgrt_image) {
+               pr_notice("Ignoring BGRT: failed to map image memory\n");
+               return -ENOMEM;
+       }
+
        bin_attr_image.private = bgrt_image;
        bin_attr_image.size = bgrt_image_size;
 
        bgrt_kobj = kobject_create_and_add("bgrt", acpi_kobj);
-       if (!bgrt_kobj)
-               return -EINVAL;
+       if (!bgrt_kobj) {
+               ret = -EINVAL;
+               goto out_memmap;
+       }
 
        ret = sysfs_create_group(bgrt_kobj, &bgrt_attribute_group);
        if (ret)
@@ -102,6 +112,8 @@ static int __init bgrt_init(void)
 
 out_kobject:
        kobject_put(bgrt_kobj);
+out_memmap:
+       memunmap(bgrt_image);
        return ret;
 }
 device_initcall(bgrt_init);
diff --git a/drivers/acpi/gsi.c b/drivers/acpi/gsi.c
deleted file mode 100644 (file)
index ee9e0f2..0000000
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * ACPI GSI IRQ layer
- *
- * Copyright (C) 2015 ARM Ltd.
- * Author: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/acpi.h>
-#include <linux/irq.h>
-#include <linux/irqdomain.h>
-#include <linux/of.h>
-
-enum acpi_irq_model_id acpi_irq_model;
-
-static struct fwnode_handle *acpi_gsi_domain_id;
-
-/**
- * acpi_gsi_to_irq() - Retrieve the linux irq number for a given GSI
- * @gsi: GSI IRQ number to map
- * @irq: pointer where linux IRQ number is stored
- *
- * irq location updated with irq value [>0 on success, 0 on failure]
- *
- * Returns: linux IRQ number on success (>0)
- *          -EINVAL on failure
- */
-int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
-{
-       struct irq_domain *d = irq_find_matching_fwnode(acpi_gsi_domain_id,
-                                                       DOMAIN_BUS_ANY);
-
-       *irq = irq_find_mapping(d, gsi);
-       /*
-        * *irq == 0 means no mapping, that should
-        * be reported as a failure
-        */
-       return (*irq > 0) ? *irq : -EINVAL;
-}
-EXPORT_SYMBOL_GPL(acpi_gsi_to_irq);
-
-/**
- * acpi_register_gsi() - Map a GSI to a linux IRQ number
- * @dev: device for which IRQ has to be mapped
- * @gsi: GSI IRQ number
- * @trigger: trigger type of the GSI number to be mapped
- * @polarity: polarity of the GSI to be mapped
- *
- * Returns: a valid linux IRQ number on success
- *          -EINVAL on failure
- */
-int acpi_register_gsi(struct device *dev, u32 gsi, int trigger,
-                     int polarity)
-{
-       struct irq_fwspec fwspec;
-
-       if (WARN_ON(!acpi_gsi_domain_id)) {
-               pr_warn("GSI: No registered irqchip, giving up\n");
-               return -EINVAL;
-       }
-
-       fwspec.fwnode = acpi_gsi_domain_id;
-       fwspec.param[0] = gsi;
-       fwspec.param[1] = acpi_dev_get_irq_type(trigger, polarity);
-       fwspec.param_count = 2;
-
-       return irq_create_fwspec_mapping(&fwspec);
-}
-EXPORT_SYMBOL_GPL(acpi_register_gsi);
-
-/**
- * acpi_unregister_gsi() - Free a GSI<->linux IRQ number mapping
- * @gsi: GSI IRQ number
- */
-void acpi_unregister_gsi(u32 gsi)
-{
-       struct irq_domain *d = irq_find_matching_fwnode(acpi_gsi_domain_id,
-                                                       DOMAIN_BUS_ANY);
-       int irq = irq_find_mapping(d, gsi);
-
-       irq_dispose_mapping(irq);
-}
-EXPORT_SYMBOL_GPL(acpi_unregister_gsi);
-
-/**
- * acpi_set_irq_model - Setup the GSI irqdomain information
- * @model: the value assigned to acpi_irq_model
- * @fwnode: the irq_domain identifier for mapping and looking up
- *          GSI interrupts
- */
-void __init acpi_set_irq_model(enum acpi_irq_model_id model,
-                              struct fwnode_handle *fwnode)
-{
-       acpi_irq_model = model;
-       acpi_gsi_domain_id = fwnode;
-}
diff --git a/drivers/acpi/irq.c b/drivers/acpi/irq.c
new file mode 100644 (file)
index 0000000..830299a
--- /dev/null
@@ -0,0 +1,297 @@
+/*
+ * ACPI GSI IRQ layer
+ *
+ * Copyright (C) 2015 ARM Ltd.
+ * Author: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/acpi.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/of.h>
+
+enum acpi_irq_model_id acpi_irq_model;
+
+static struct fwnode_handle *acpi_gsi_domain_id;
+
+/**
+ * acpi_gsi_to_irq() - Retrieve the linux irq number for a given GSI
+ * @gsi: GSI IRQ number to map
+ * @irq: pointer where linux IRQ number is stored
+ *
+ * irq location updated with irq value [>0 on success, 0 on failure]
+ *
+ * Returns: linux IRQ number on success (>0)
+ *          -EINVAL on failure
+ */
+int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
+{
+       struct irq_domain *d = irq_find_matching_fwnode(acpi_gsi_domain_id,
+                                                       DOMAIN_BUS_ANY);
+
+       *irq = irq_find_mapping(d, gsi);
+       /*
+        * *irq == 0 means no mapping, that should
+        * be reported as a failure
+        */
+       return (*irq > 0) ? *irq : -EINVAL;
+}
+EXPORT_SYMBOL_GPL(acpi_gsi_to_irq);
+
+/**
+ * acpi_register_gsi() - Map a GSI to a linux IRQ number
+ * @dev: device for which IRQ has to be mapped
+ * @gsi: GSI IRQ number
+ * @trigger: trigger type of the GSI number to be mapped
+ * @polarity: polarity of the GSI to be mapped
+ *
+ * Returns: a valid linux IRQ number on success
+ *          -EINVAL on failure
+ */
+int acpi_register_gsi(struct device *dev, u32 gsi, int trigger,
+                     int polarity)
+{
+       struct irq_fwspec fwspec;
+
+       if (WARN_ON(!acpi_gsi_domain_id)) {
+               pr_warn("GSI: No registered irqchip, giving up\n");
+               return -EINVAL;
+       }
+
+       fwspec.fwnode = acpi_gsi_domain_id;
+       fwspec.param[0] = gsi;
+       fwspec.param[1] = acpi_dev_get_irq_type(trigger, polarity);
+       fwspec.param_count = 2;
+
+       return irq_create_fwspec_mapping(&fwspec);
+}
+EXPORT_SYMBOL_GPL(acpi_register_gsi);
+
+/**
+ * acpi_unregister_gsi() - Free a GSI<->linux IRQ number mapping
+ * @gsi: GSI IRQ number
+ */
+void acpi_unregister_gsi(u32 gsi)
+{
+       struct irq_domain *d = irq_find_matching_fwnode(acpi_gsi_domain_id,
+                                                       DOMAIN_BUS_ANY);
+       int irq = irq_find_mapping(d, gsi);
+
+       irq_dispose_mapping(irq);
+}
+EXPORT_SYMBOL_GPL(acpi_unregister_gsi);
+
+/**
+ * acpi_get_irq_source_fwhandle() - Retrieve fwhandle from IRQ resource source.
+ * @source: acpi_resource_source to use for the lookup.
+ *
+ * Description:
+ * Retrieve the fwhandle of the device referenced by the given IRQ resource
+ * source.
+ *
+ * Return:
+ * The referenced device fwhandle or NULL on failure
+ */
+static struct fwnode_handle *
+acpi_get_irq_source_fwhandle(const struct acpi_resource_source *source)
+{
+       struct fwnode_handle *result;
+       struct acpi_device *device;
+       acpi_handle handle;
+       acpi_status status;
+
+       if (!source->string_length)
+               return acpi_gsi_domain_id;
+
+       status = acpi_get_handle(NULL, source->string_ptr, &handle);
+       if (WARN_ON(ACPI_FAILURE(status)))
+               return NULL;
+
+       device = acpi_bus_get_acpi_device(handle);
+       if (WARN_ON(!device))
+               return NULL;
+
+       result = &device->fwnode;
+       acpi_bus_put_acpi_device(device);
+       return result;
+}
+
+/*
+ * Context for the resource walk used to lookup IRQ resources.
+ * Contains a return code, the lookup index, and references to the flags
+ * and fwspec where the result is returned.
+ */
+struct acpi_irq_parse_one_ctx {
+       int rc;
+       unsigned int index;
+       unsigned long *res_flags;
+       struct irq_fwspec *fwspec;
+};
+
+/**
+ * acpi_irq_parse_one_match - Handle a matching IRQ resource.
+ * @fwnode: matching fwnode
+ * @hwirq: hardware IRQ number
+ * @triggering: triggering attributes of hwirq
+ * @polarity: polarity attributes of hwirq
+ * @polarity: polarity attributes of hwirq
+ * @shareable: shareable attributes of hwirq
+ * @ctx: acpi_irq_parse_one_ctx updated by this function
+ *
+ * Description:
+ * Handle a matching IRQ resource by populating the given ctx with
+ * the information passed.
+ */
+static inline void acpi_irq_parse_one_match(struct fwnode_handle *fwnode,
+                                           u32 hwirq, u8 triggering,
+                                           u8 polarity, u8 shareable,
+                                           struct acpi_irq_parse_one_ctx *ctx)
+{
+       if (!fwnode)
+               return;
+       ctx->rc = 0;
+       *ctx->res_flags = acpi_dev_irq_flags(triggering, polarity, shareable);
+       ctx->fwspec->fwnode = fwnode;
+       ctx->fwspec->param[0] = hwirq;
+       ctx->fwspec->param[1] = acpi_dev_get_irq_type(triggering, polarity);
+       ctx->fwspec->param_count = 2;
+}
+
+/**
+ * acpi_irq_parse_one_cb - Handle the given resource.
+ * @ares: resource to handle
+ * @context: context for the walk
+ *
+ * Description:
+ * This is called by acpi_walk_resources passing each resource returned by
+ * the _CRS method. We only inspect IRQ resources. Since IRQ resources
+ * might contain multiple interrupts we check if the index is within this
+ * one's interrupt array, otherwise we subtract the current resource IRQ
+ * count from the lookup index to prepare for the next resource.
+ * Once a match is found we call acpi_irq_parse_one_match to populate
+ * the result and end the walk by returning AE_CTRL_TERMINATE.
+ *
+ * Return:
+ * AE_OK if the walk should continue, AE_CTRL_TERMINATE if a matching
+ * IRQ resource was found.
+ */
+static acpi_status acpi_irq_parse_one_cb(struct acpi_resource *ares,
+                                        void *context)
+{
+       struct acpi_irq_parse_one_ctx *ctx = context;
+       struct acpi_resource_irq *irq;
+       struct acpi_resource_extended_irq *eirq;
+       struct fwnode_handle *fwnode;
+
+       switch (ares->type) {
+       case ACPI_RESOURCE_TYPE_IRQ:
+               irq = &ares->data.irq;
+               if (ctx->index >= irq->interrupt_count) {
+                       ctx->index -= irq->interrupt_count;
+                       return AE_OK;
+               }
+               fwnode = acpi_gsi_domain_id;
+               acpi_irq_parse_one_match(fwnode, irq->interrupts[ctx->index],
+                                        irq->triggering, irq->polarity,
+                                        irq->sharable, ctx);
+               return AE_CTRL_TERMINATE;
+       case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
+               eirq = &ares->data.extended_irq;
+               if (eirq->producer_consumer == ACPI_PRODUCER)
+                       return AE_OK;
+               if (ctx->index >= eirq->interrupt_count) {
+                       ctx->index -= eirq->interrupt_count;
+                       return AE_OK;
+               }
+               fwnode = acpi_get_irq_source_fwhandle(&eirq->resource_source);
+               acpi_irq_parse_one_match(fwnode, eirq->interrupts[ctx->index],
+                                        eirq->triggering, eirq->polarity,
+                                        eirq->sharable, ctx);
+               return AE_CTRL_TERMINATE;
+       }
+
+       return AE_OK;
+}
+
+/**
+ * acpi_irq_parse_one - Resolve an interrupt for a device
+ * @handle: the device whose interrupt is to be resolved
+ * @index: index of the interrupt to resolve
+ * @fwspec: structure irq_fwspec filled by this function
+ * @flags: resource flags filled by this function
+ *
+ * Description:
+ * Resolves an interrupt for a device by walking its CRS resources to find
+ * the appropriate ACPI IRQ resource and populating the given struct irq_fwspec
+ * and flags.
+ *
+ * Return:
+ * The result stored in ctx.rc by the callback, or the default -EINVAL value
+ * if an error occurs.
+ */
+static int acpi_irq_parse_one(acpi_handle handle, unsigned int index,
+                             struct irq_fwspec *fwspec, unsigned long *flags)
+{
+       struct acpi_irq_parse_one_ctx ctx = { -EINVAL, index, flags, fwspec };
+
+       acpi_walk_resources(handle, METHOD_NAME__CRS, acpi_irq_parse_one_cb, &ctx);
+       return ctx.rc;
+}
+
+/**
+ * acpi_irq_get - Lookup an ACPI IRQ resource and use it to initialize resource.
+ * @handle: ACPI device handle
+ * @index:  ACPI IRQ resource index to lookup
+ * @res:    Linux IRQ resource to initialize
+ *
+ * Description:
+ * Look for the ACPI IRQ resource with the given index and use it to initialize
+ * the given Linux IRQ resource.
+ *
+ * Return:
+ * 0 on success
+ * -EINVAL if an error occurs
+ * -EPROBE_DEFER if the IRQ lookup/conversion failed
+ */
+int acpi_irq_get(acpi_handle handle, unsigned int index, struct resource *res)
+{
+       struct irq_fwspec fwspec;
+       struct irq_domain *domain;
+       unsigned long flags;
+       int rc;
+
+       rc = acpi_irq_parse_one(handle, index, &fwspec, &flags);
+       if (rc)
+               return rc;
+
+       domain = irq_find_matching_fwnode(fwspec.fwnode, DOMAIN_BUS_ANY);
+       if (!domain)
+               return -EPROBE_DEFER;
+
+       rc = irq_create_fwspec_mapping(&fwspec);
+       if (rc <= 0)
+               return -EINVAL;
+
+       res->start = rc;
+       res->end = rc;
+       res->flags = flags;
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(acpi_irq_get);
+
+/**
+ * acpi_set_irq_model - Setup the GSI irqdomain information
+ * @model: the value assigned to acpi_irq_model
+ * @fwnode: the irq_domain identifier for mapping and looking up
+ *          GSI interrupts
+ */
+void __init acpi_set_irq_model(enum acpi_irq_model_id model,
+                              struct fwnode_handle *fwnode)
+{
+       acpi_irq_model = model;
+       acpi_gsi_domain_id = fwnode;
+}
index 2f82b8eba360e7f369338b7d7a340060d6519f4f..7361d00818e2bb61f5d280c6817db2a2e8d01fc4 100644 (file)
@@ -2704,6 +2704,7 @@ static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc)
        struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
        struct device *dev = acpi_desc->dev;
        struct acpi_nfit_flush_work flush;
+       int rc;
 
        /* bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */
        device_lock(dev);
@@ -2716,7 +2717,10 @@ static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc)
        INIT_WORK_ONSTACK(&flush.work, flush_probe);
        COMPLETION_INITIALIZER_ONSTACK(flush.cmp);
        queue_work(nfit_wq, &flush.work);
-       return wait_for_completion_interruptible(&flush.cmp);
+
+       rc = wait_for_completion_interruptible(&flush.cmp);
+       cancel_work_sync(&flush.work);
+       return rc;
 }
 
 static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
index e5ce81c38eed4bbeea0bcbc380c77ba66d0b097c..3ba1c3472cf9e293ae70fd7cdc649eeb96abf1a0 100644 (file)
@@ -90,6 +90,7 @@ static int nfit_handle_mce(struct notifier_block *nb, unsigned long val,
 
 static struct notifier_block nfit_mce_dec = {
        .notifier_call  = nfit_handle_mce,
+       .priority       = MCE_PRIO_NFIT,
 };
 
 void nfit_mce_register(void)
index f0b4a981b8d38250f4954b7ef26d2edba5c8b2e1..18b72eec350764813dcde187cdd665e3d21ba191 100644 (file)
@@ -75,10 +75,8 @@ static int acpi_processor_ppc_notifier(struct notifier_block *nb,
        struct acpi_processor *pr;
        unsigned int ppc = 0;
 
-       if (event == CPUFREQ_START && ignore_ppc <= 0) {
+       if (ignore_ppc < 0)
                ignore_ppc = 0;
-               return 0;
-       }
 
        if (ignore_ppc)
                return 0;
index cb57962ef7c4560793842ca271bfd1eb5650b422..8b11d6d385dcd22a75f9f8bfdf9bd81856bf87ba 100644 (file)
@@ -43,6 +43,19 @@ static inline bool
 acpi_iospace_resource_valid(struct resource *res) { return true; }
 #endif
 
+#if IS_ENABLED(CONFIG_ACPI_GENERIC_GSI)
+static inline bool is_gsi(struct acpi_resource_extended_irq *ext_irq)
+{
+       return ext_irq->resource_source.string_length == 0 &&
+              ext_irq->producer_consumer == ACPI_CONSUMER;
+}
+#else
+static inline bool is_gsi(struct acpi_resource_extended_irq *ext_irq)
+{
+       return true;
+}
+#endif
+
 static bool acpi_dev_resource_len_valid(u64 start, u64 end, u64 len, bool io)
 {
        u64 reslen = end - start + 1;
@@ -470,9 +483,12 @@ bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index,
                        acpi_dev_irqresource_disabled(res, 0);
                        return false;
                }
-               acpi_dev_get_irqresource(res, ext_irq->interrupts[index],
+               if (is_gsi(ext_irq))
+                       acpi_dev_get_irqresource(res, ext_irq->interrupts[index],
                                         ext_irq->triggering, ext_irq->polarity,
                                         ext_irq->sharable, false);
+               else
+                       acpi_dev_irqresource_disabled(res, 0);
                break;
        default:
                res->flags = 0;
index 9b6cebe227a08562985ce304463dcbe1f1ee1154..54abb26b736639ca54aa7051ae742d6657a501bc 100644 (file)
@@ -674,14 +674,6 @@ static void acpi_sleep_suspend_setup(void)
                if (acpi_sleep_state_supported(i))
                        sleep_states[i] = 1;
 
-       /*
-        * Use suspend-to-idle by default if ACPI_FADT_LOW_POWER_S0 is set and
-        * the default suspend mode was not selected from the command line.
-        */
-       if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0 &&
-           mem_sleep_default > PM_SUSPEND_MEM)
-               mem_sleep_default = PM_SUSPEND_FREEZE;
-
        suspend_set_ops(old_suspend_ordering ?
                &acpi_suspend_ops_old : &acpi_suspend_ops);
        freeze_set_ops(&acpi_freeze_ops);
index 02ded25c82e4a06e1e79bf2f0a4855aa933b3df1..7f48156cbc0c0b47a22943b60bf374d8a86ea6e3 100644 (file)
@@ -305,17 +305,6 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
                DMI_MATCH(DMI_PRODUCT_NAME, "Dell System XPS L702X"),
                },
        },
-       {
-       /* https://bugzilla.redhat.com/show_bug.cgi?id=1204476 */
-       /* https://bugs.launchpad.net/ubuntu/+source/linux-lts-trusty/+bug/1416940 */
-       .callback = video_detect_force_native,
-       .ident = "HP Pavilion dv6",
-       .matches = {
-               DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
-               DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv6 Notebook PC"),
-               },
-       },
-
        { },
 };
 
index 9cd0a2d4181699d94f73f2af82490e59f23373e2..c2d3785ec2279f42013cdc4816beb60785279d95 100644 (file)
@@ -1702,6 +1702,8 @@ unsigned ata_exec_internal_sg(struct ata_device *dev,
 
                if (qc->err_mask & ~AC_ERR_OTHER)
                        qc->err_mask &= ~AC_ERR_OTHER;
+       } else if (qc->tf.command == ATA_CMD_REQ_SENSE_DATA) {
+               qc->result_tf.command |= ATA_SENSE;
        }
 
        /* finish up */
@@ -4356,10 +4358,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
        { "ST380013AS",         "3.20",         ATA_HORKAGE_MAX_SEC_1024 },
 
        /*
-        * Device times out with higher max sects.
+        * These devices time out with higher max sects.
         * https://bugzilla.kernel.org/show_bug.cgi?id=121671
         */
-       { "LITEON CX1-JB256-HP", NULL,          ATA_HORKAGE_MAX_SEC_1024 },
+       { "LITEON CX1-JB*-HP",  NULL,           ATA_HORKAGE_MAX_SEC_1024 },
 
        /* Devices we expect to fail diagnostics */
 
index 823e938c9a7877a1cadefde9127d447832630061..2f32782cea6d9c584797d1f7d9dc8e99eac0b796 100644 (file)
@@ -4132,6 +4132,9 @@ static int mv_platform_probe(struct platform_device *pdev)
        host->iomap = NULL;
        hpriv->base = devm_ioremap(&pdev->dev, res->start,
                                   resource_size(res));
+       if (!hpriv->base)
+               return -ENOMEM;
+
        hpriv->base -= SATAHC0_REG_BASE;
 
        hpriv->clk = clk_get(&pdev->dev, NULL);
index 4c28e1a0978666e3f228a005661981e44205bd49..2c3b359b3536a15cdb21fbc60af80644368c655d 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/of.h>
 #include <linux/cpufeature.h>
 #include <linux/tick.h>
+#include <linux/pm_qos.h>
 
 #include "base.h"
 
@@ -376,6 +377,7 @@ int register_cpu(struct cpu *cpu, int num)
 
        per_cpu(cpu_sys_devices, num) = &cpu->dev;
        register_cpu_under_node(num, cpu_to_node(num));
+       dev_pm_qos_expose_latency_limit(&cpu->dev, 0);
 
        return 0;
 }
index 4497d263209fb861e08e1650ddf654a98f2102e3..ac350c518e0c9479c05c4d9ff9f6ae918f26b96c 100644 (file)
@@ -558,9 +558,6 @@ static void fw_load_abort(struct firmware_priv *fw_priv)
        struct firmware_buf *buf = fw_priv->buf;
 
        __fw_load_abort(buf);
-
-       /* avoid user action after loading abort */
-       fw_priv->buf = NULL;
 }
 
 static LIST_HEAD(pending_fw_head);
@@ -713,7 +710,7 @@ static ssize_t firmware_loading_store(struct device *dev,
 
        mutex_lock(&fw_lock);
        fw_buf = fw_priv->buf;
-       if (!fw_buf)
+       if (fw_state_is_aborted(&fw_buf->fw_st))
                goto out;
 
        switch (loading) {
index 8ab8ea1253e62310a68d9e6bf039d8d866ee4019..fa26ffd25fa61bae95bd441699a54ee8e16818d2 100644 (file)
@@ -389,33 +389,33 @@ static ssize_t show_valid_zones(struct device *dev,
 {
        struct memory_block *mem = to_memory_block(dev);
        unsigned long start_pfn, end_pfn;
+       unsigned long valid_start, valid_end, valid_pages;
        unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
-       struct page *first_page;
        struct zone *zone;
        int zone_shift = 0;
 
        start_pfn = section_nr_to_pfn(mem->start_section_nr);
        end_pfn = start_pfn + nr_pages;
-       first_page = pfn_to_page(start_pfn);
 
        /* The block contains more than one zone can not be offlined. */
-       if (!test_pages_in_a_zone(start_pfn, end_pfn))
+       if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start, &valid_end))
                return sprintf(buf, "none\n");
 
-       zone = page_zone(first_page);
+       zone = page_zone(pfn_to_page(valid_start));
+       valid_pages = valid_end - valid_start;
 
        /* MMOP_ONLINE_KEEP */
        sprintf(buf, "%s", zone->name);
 
        /* MMOP_ONLINE_KERNEL */
-       zone_shift = zone_can_shift(start_pfn, nr_pages, ZONE_NORMAL);
+       zone_can_shift(valid_start, valid_pages, ZONE_NORMAL, &zone_shift);
        if (zone_shift) {
                strcat(buf, " ");
                strcat(buf, (zone + zone_shift)->name);
        }
 
        /* MMOP_ONLINE_MOVABLE */
-       zone_shift = zone_can_shift(start_pfn, nr_pages, ZONE_MOVABLE);
+       zone_can_shift(valid_start, valid_pages, ZONE_MOVABLE, &zone_shift);
        if (zone_shift) {
                strcat(buf, " ");
                strcat(buf, (zone + zone_shift)->name);
index be6a599bc0c1a8da08d0a20f31d7bc054dd1a972..0fc7c4da77563226ad0a36a6fc7dbb8a26948793 100644 (file)
@@ -206,7 +206,7 @@ platform_msi_alloc_priv_data(struct device *dev, unsigned int nvec,
 {
        struct platform_msi_priv_data *datap;
        /*
-        * Limit the number of interrupts to 256 per device. Should we
+        * Limit the number of interrupts to 2048 per device. Should we
         * need to bump this up, DEV_ID_SHIFT should be adjusted
         * accordingly (which would impact the max number of MSI
         * capable devices).
index c4af00385502ffd9039bc73d1eccc2eedef33ce4..647e4761dbf3fad15392f6d5452c63307113c592 100644 (file)
@@ -102,6 +102,16 @@ int platform_get_irq(struct platform_device *dev, unsigned int num)
        }
 
        r = platform_get_resource(dev, IORESOURCE_IRQ, num);
+       if (has_acpi_companion(&dev->dev)) {
+               if (r && r->flags & IORESOURCE_DISABLED) {
+                       int ret;
+
+                       ret = acpi_irq_get(ACPI_HANDLE(&dev->dev), num, r);
+                       if (ret)
+                               return ret;
+               }
+       }
+
        /*
         * The resources may pass trigger flags to the irqs that need
         * to be set up. It so happens that the trigger flags for
index 2997026b4dfb00c1daec88f1f18264656a61ffb0..e697dec9d25bf585175a5ee569097f849f9669c6 100644 (file)
@@ -130,7 +130,7 @@ static inline bool irq_safe_dev_in_no_sleep_domain(struct device *dev,
 
        ret = pm_runtime_is_irq_safe(dev) && !genpd_is_irq_safe(genpd);
 
-       /* Warn once for each IRQ safe dev in no sleep domain */
+       /* Warn once if IRQ safe dev in no sleep domain */
        if (ret)
                dev_warn_once(dev, "PM domain %s will not be powered off\n",
                                genpd->name);
@@ -201,7 +201,7 @@ static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
        smp_mb__after_atomic();
 }
 
-static int genpd_power_on(struct generic_pm_domain *genpd, bool timed)
+static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
 {
        unsigned int state_idx = genpd->state_idx;
        ktime_t time_start;
@@ -231,7 +231,7 @@ static int genpd_power_on(struct generic_pm_domain *genpd, bool timed)
        return ret;
 }
 
-static int genpd_power_off(struct generic_pm_domain *genpd, bool timed)
+static int _genpd_power_off(struct generic_pm_domain *genpd, bool timed)
 {
        unsigned int state_idx = genpd->state_idx;
        ktime_t time_start;
@@ -262,10 +262,10 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool timed)
 }
 
 /**
- * genpd_queue_power_off_work - Queue up the execution of genpd_poweroff().
+ * genpd_queue_power_off_work - Queue up the execution of genpd_power_off().
  * @genpd: PM domain to power off.
  *
- * Queue up the execution of genpd_poweroff() unless it's already been done
+ * Queue up the execution of genpd_power_off() unless it's already been done
  * before.
  */
 static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
@@ -274,14 +274,101 @@ static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
 }
 
 /**
- * genpd_poweron - Restore power to a given PM domain and its masters.
+ * genpd_power_off - Remove power from a given PM domain.
+ * @genpd: PM domain to power down.
+ * @one_dev_on: If invoked from genpd's ->runtime_suspend|resume() callback, the
+ * RPM status of the releated device is in an intermediate state, not yet turned
+ * into RPM_SUSPENDED. This means genpd_power_off() must allow one device to not
+ * be RPM_SUSPENDED, while it tries to power off the PM domain.
+ *
+ * If all of the @genpd's devices have been suspended and all of its subdomains
+ * have been powered down, remove power from @genpd.
+ */
+static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
+                          unsigned int depth)
+{
+       struct pm_domain_data *pdd;
+       struct gpd_link *link;
+       unsigned int not_suspended = 0;
+
+       /*
+        * Do not try to power off the domain in the following situations:
+        * (1) The domain is already in the "power off" state.
+        * (2) System suspend is in progress.
+        */
+       if (genpd->status == GPD_STATE_POWER_OFF
+           || genpd->prepared_count > 0)
+               return 0;
+
+       if (atomic_read(&genpd->sd_count) > 0)
+               return -EBUSY;
+
+       list_for_each_entry(pdd, &genpd->dev_list, list_node) {
+               enum pm_qos_flags_status stat;
+
+               stat = dev_pm_qos_flags(pdd->dev,
+                                       PM_QOS_FLAG_NO_POWER_OFF
+                                               | PM_QOS_FLAG_REMOTE_WAKEUP);
+               if (stat > PM_QOS_FLAGS_NONE)
+                       return -EBUSY;
+
+               /*
+                * Do not allow PM domain to be powered off, when an IRQ safe
+                * device is part of a non-IRQ safe domain.
+                */
+               if (!pm_runtime_suspended(pdd->dev) ||
+                       irq_safe_dev_in_no_sleep_domain(pdd->dev, genpd))
+                       not_suspended++;
+       }
+
+       if (not_suspended > 1 || (not_suspended == 1 && !one_dev_on))
+               return -EBUSY;
+
+       if (genpd->gov && genpd->gov->power_down_ok) {
+               if (!genpd->gov->power_down_ok(&genpd->domain))
+                       return -EAGAIN;
+       }
+
+       if (genpd->power_off) {
+               int ret;
+
+               if (atomic_read(&genpd->sd_count) > 0)
+                       return -EBUSY;
+
+               /*
+                * If sd_count > 0 at this point, one of the subdomains hasn't
+                * managed to call genpd_power_on() for the master yet after
+                * incrementing it.  In that case genpd_power_on() will wait
+                * for us to drop the lock, so we can call .power_off() and let
+                * the genpd_power_on() restore power for us (this shouldn't
+                * happen very often).
+                */
+               ret = _genpd_power_off(genpd, true);
+               if (ret)
+                       return ret;
+       }
+
+       genpd->status = GPD_STATE_POWER_OFF;
+
+       list_for_each_entry(link, &genpd->slave_links, slave_node) {
+               genpd_sd_counter_dec(link->master);
+               genpd_lock_nested(link->master, depth + 1);
+               genpd_power_off(link->master, false, depth + 1);
+               genpd_unlock(link->master);
+       }
+
+       return 0;
+}
+
+/**
+ * genpd_power_on - Restore power to a given PM domain and its masters.
  * @genpd: PM domain to power up.
  * @depth: nesting count for lockdep.
  *
  * Restore power to @genpd and all of its masters so that it is possible to
  * resume a device belonging to it.
  */
-static int genpd_poweron(struct generic_pm_domain *genpd, unsigned int depth)
+static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
 {
        struct gpd_link *link;
        int ret = 0;
@@ -300,7 +387,7 @@ static int genpd_poweron(struct generic_pm_domain *genpd, unsigned int depth)
                genpd_sd_counter_inc(master);
 
                genpd_lock_nested(master, depth + 1);
-               ret = genpd_poweron(master, depth + 1);
+               ret = genpd_power_on(master, depth + 1);
                genpd_unlock(master);
 
                if (ret) {
@@ -309,7 +396,7 @@ static int genpd_poweron(struct generic_pm_domain *genpd, unsigned int depth)
                }
        }
 
-       ret = genpd_power_on(genpd, true);
+       ret = _genpd_power_on(genpd, true);
        if (ret)
                goto err;
 
@@ -321,7 +408,9 @@ static int genpd_poweron(struct generic_pm_domain *genpd, unsigned int depth)
                                        &genpd->slave_links,
                                        slave_node) {
                genpd_sd_counter_dec(link->master);
-               genpd_queue_power_off_work(link->master);
+               genpd_lock_nested(link->master, depth + 1);
+               genpd_power_off(link->master, false, depth + 1);
+               genpd_unlock(link->master);
        }
 
        return ret;
@@ -367,87 +456,6 @@ static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
        return NOTIFY_DONE;
 }
 
-/**
- * genpd_poweroff - Remove power from a given PM domain.
- * @genpd: PM domain to power down.
- * @is_async: PM domain is powered down from a scheduled work
- *
- * If all of the @genpd's devices have been suspended and all of its subdomains
- * have been powered down, remove power from @genpd.
- */
-static int genpd_poweroff(struct generic_pm_domain *genpd, bool is_async)
-{
-       struct pm_domain_data *pdd;
-       struct gpd_link *link;
-       unsigned int not_suspended = 0;
-
-       /*
-        * Do not try to power off the domain in the following situations:
-        * (1) The domain is already in the "power off" state.
-        * (2) System suspend is in progress.
-        */
-       if (genpd->status == GPD_STATE_POWER_OFF
-           || genpd->prepared_count > 0)
-               return 0;
-
-       if (atomic_read(&genpd->sd_count) > 0)
-               return -EBUSY;
-
-       list_for_each_entry(pdd, &genpd->dev_list, list_node) {
-               enum pm_qos_flags_status stat;
-
-               stat = dev_pm_qos_flags(pdd->dev,
-                                       PM_QOS_FLAG_NO_POWER_OFF
-                                               | PM_QOS_FLAG_REMOTE_WAKEUP);
-               if (stat > PM_QOS_FLAGS_NONE)
-                       return -EBUSY;
-
-               /*
-                * Do not allow PM domain to be powered off, when an IRQ safe
-                * device is part of a non-IRQ safe domain.
-                */
-               if (!pm_runtime_suspended(pdd->dev) ||
-                       irq_safe_dev_in_no_sleep_domain(pdd->dev, genpd))
-                       not_suspended++;
-       }
-
-       if (not_suspended > 1 || (not_suspended == 1 && is_async))
-               return -EBUSY;
-
-       if (genpd->gov && genpd->gov->power_down_ok) {
-               if (!genpd->gov->power_down_ok(&genpd->domain))
-                       return -EAGAIN;
-       }
-
-       if (genpd->power_off) {
-               int ret;
-
-               if (atomic_read(&genpd->sd_count) > 0)
-                       return -EBUSY;
-
-               /*
-                * If sd_count > 0 at this point, one of the subdomains hasn't
-                * managed to call genpd_poweron() for the master yet after
-                * incrementing it.  In that case genpd_poweron() will wait
-                * for us to drop the lock, so we can call .power_off() and let
-                * the genpd_poweron() restore power for us (this shouldn't
-                * happen very often).
-                */
-               ret = genpd_power_off(genpd, true);
-               if (ret)
-                       return ret;
-       }
-
-       genpd->status = GPD_STATE_POWER_OFF;
-
-       list_for_each_entry(link, &genpd->slave_links, slave_node) {
-               genpd_sd_counter_dec(link->master);
-               genpd_queue_power_off_work(link->master);
-       }
-
-       return 0;
-}
-
 /**
  * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
  * @work: Work structure used for scheduling the execution of this function.
@@ -459,7 +467,7 @@ static void genpd_power_off_work_fn(struct work_struct *work)
        genpd = container_of(work, struct generic_pm_domain, power_off_work);
 
        genpd_lock(genpd);
-       genpd_poweroff(genpd, true);
+       genpd_power_off(genpd, false, 0);
        genpd_unlock(genpd);
 }
 
@@ -578,7 +586,7 @@ static int genpd_runtime_suspend(struct device *dev)
                return 0;
 
        genpd_lock(genpd);
-       genpd_poweroff(genpd, false);
+       genpd_power_off(genpd, true, 0);
        genpd_unlock(genpd);
 
        return 0;
@@ -618,7 +626,7 @@ static int genpd_runtime_resume(struct device *dev)
        }
 
        genpd_lock(genpd);
-       ret = genpd_poweron(genpd, 0);
+       ret = genpd_power_on(genpd, 0);
        genpd_unlock(genpd);
 
        if (ret)
@@ -658,7 +666,7 @@ err_poweroff:
        if (!pm_runtime_is_irq_safe(dev) ||
                (pm_runtime_is_irq_safe(dev) && genpd_is_irq_safe(genpd))) {
                genpd_lock(genpd);
-               genpd_poweroff(genpd, 0);
+               genpd_power_off(genpd, true, 0);
                genpd_unlock(genpd);
        }
 
@@ -674,9 +682,9 @@ static int __init pd_ignore_unused_setup(char *__unused)
 __setup("pd_ignore_unused", pd_ignore_unused_setup);
 
 /**
- * genpd_poweroff_unused - Power off all PM domains with no devices in use.
+ * genpd_power_off_unused - Power off all PM domains with no devices in use.
  */
-static int __init genpd_poweroff_unused(void)
+static int __init genpd_power_off_unused(void)
 {
        struct generic_pm_domain *genpd;
 
@@ -694,7 +702,7 @@ static int __init genpd_poweroff_unused(void)
 
        return 0;
 }
-late_initcall(genpd_poweroff_unused);
+late_initcall(genpd_power_off_unused);
 
 #if defined(CONFIG_PM_SLEEP) || defined(CONFIG_PM_GENERIC_DOMAINS_OF)
 
@@ -727,18 +735,20 @@ static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd,
 }
 
 /**
- * genpd_sync_poweroff - Synchronously power off a PM domain and its masters.
+ * genpd_sync_power_off - Synchronously power off a PM domain and its masters.
  * @genpd: PM domain to power off, if possible.
+ * @use_lock: use the lock.
+ * @depth: nesting count for lockdep.
  *
  * Check if the given PM domain can be powered off (during system suspend or
  * hibernation) and do that if so.  Also, in that case propagate to its masters.
  *
  * This function is only called in "noirq" and "syscore" stages of system power
- * transitions, so it need not acquire locks (all of the "noirq" callbacks are
- * executed sequentially, so it is guaranteed that it will never run twice in
- * parallel).
+ * transitions. The "noirq" callbacks may be executed asynchronously, thus in
+ * these cases the lock must be held.
  */
-static void genpd_sync_poweroff(struct generic_pm_domain *genpd)
+static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock,
+                                unsigned int depth)
 {
        struct gpd_link *link;
 
@@ -751,26 +761,35 @@ static void genpd_sync_poweroff(struct generic_pm_domain *genpd)
 
        /* Choose the deepest state when suspending */
        genpd->state_idx = genpd->state_count - 1;
-       genpd_power_off(genpd, false);
+       _genpd_power_off(genpd, false);
 
        genpd->status = GPD_STATE_POWER_OFF;
 
        list_for_each_entry(link, &genpd->slave_links, slave_node) {
                genpd_sd_counter_dec(link->master);
-               genpd_sync_poweroff(link->master);
+
+               if (use_lock)
+                       genpd_lock_nested(link->master, depth + 1);
+
+               genpd_sync_power_off(link->master, use_lock, depth + 1);
+
+               if (use_lock)
+                       genpd_unlock(link->master);
        }
 }
 
 /**
- * genpd_sync_poweron - Synchronously power on a PM domain and its masters.
+ * genpd_sync_power_on - Synchronously power on a PM domain and its masters.
  * @genpd: PM domain to power on.
+ * @use_lock: use the lock.
+ * @depth: nesting count for lockdep.
  *
  * This function is only called in "noirq" and "syscore" stages of system power
- * transitions, so it need not acquire locks (all of the "noirq" callbacks are
- * executed sequentially, so it is guaranteed that it will never run twice in
- * parallel).
+ * transitions. The "noirq" callbacks may be executed asynchronously, thus in
+ * these cases the lock must be held.
  */
-static void genpd_sync_poweron(struct generic_pm_domain *genpd)
+static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock,
+                               unsigned int depth)
 {
        struct gpd_link *link;
 
@@ -778,11 +797,18 @@ static void genpd_sync_poweron(struct generic_pm_domain *genpd)
                return;
 
        list_for_each_entry(link, &genpd->slave_links, slave_node) {
-               genpd_sync_poweron(link->master);
                genpd_sd_counter_inc(link->master);
+
+               if (use_lock)
+                       genpd_lock_nested(link->master, depth + 1);
+
+               genpd_sync_power_on(link->master, use_lock, depth + 1);
+
+               if (use_lock)
+                       genpd_unlock(link->master);
        }
 
-       genpd_power_on(genpd, false);
+       _genpd_power_on(genpd, false);
 
        genpd->status = GPD_STATE_ACTIVE;
 }
@@ -888,13 +914,10 @@ static int pm_genpd_suspend_noirq(struct device *dev)
                        return ret;
        }
 
-       /*
-        * Since all of the "noirq" callbacks are executed sequentially, it is
-        * guaranteed that this function will never run twice in parallel for
-        * the same PM domain, so it is not necessary to use locking here.
-        */
+       genpd_lock(genpd);
        genpd->suspended_count++;
-       genpd_sync_poweroff(genpd);
+       genpd_sync_power_off(genpd, true, 0);
+       genpd_unlock(genpd);
 
        return 0;
 }
@@ -919,13 +942,10 @@ static int pm_genpd_resume_noirq(struct device *dev)
        if (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))
                return 0;
 
-       /*
-        * Since all of the "noirq" callbacks are executed sequentially, it is
-        * guaranteed that this function will never run twice in parallel for
-        * the same PM domain, so it is not necessary to use locking here.
-        */
-       genpd_sync_poweron(genpd);
+       genpd_lock(genpd);
+       genpd_sync_power_on(genpd, true, 0);
        genpd->suspended_count--;
+       genpd_unlock(genpd);
 
        if (genpd->dev_ops.stop && genpd->dev_ops.start)
                ret = pm_runtime_force_resume(dev);
@@ -1002,22 +1022,20 @@ static int pm_genpd_restore_noirq(struct device *dev)
                return -EINVAL;
 
        /*
-        * Since all of the "noirq" callbacks are executed sequentially, it is
-        * guaranteed that this function will never run twice in parallel for
-        * the same PM domain, so it is not necessary to use locking here.
-        *
         * At this point suspended_count == 0 means we are being run for the
         * first time for the given domain in the present cycle.
         */
+       genpd_lock(genpd);
        if (genpd->suspended_count++ == 0)
                /*
                 * The boot kernel might put the domain into arbitrary state,
-                * so make it appear as powered off to genpd_sync_poweron(),
+                * so make it appear as powered off to genpd_sync_power_on(),
                 * so that it tries to power it on in case it was really off.
                 */
                genpd->status = GPD_STATE_POWER_OFF;
 
-       genpd_sync_poweron(genpd);
+       genpd_sync_power_on(genpd, true, 0);
+       genpd_unlock(genpd);
 
        if (genpd->dev_ops.stop && genpd->dev_ops.start)
                ret = pm_runtime_force_resume(dev);
@@ -1072,9 +1090,9 @@ static void genpd_syscore_switch(struct device *dev, bool suspend)
 
        if (suspend) {
                genpd->suspended_count++;
-               genpd_sync_poweroff(genpd);
+               genpd_sync_power_off(genpd, false, 0);
        } else {
-               genpd_sync_poweron(genpd);
+               genpd_sync_power_on(genpd, false, 0);
                genpd->suspended_count--;
        }
 }
@@ -2043,7 +2061,7 @@ int genpd_dev_pm_attach(struct device *dev)
        dev->pm_domain->sync = genpd_dev_pm_sync;
 
        genpd_lock(pd);
-       ret = genpd_poweron(pd, 0);
+       ret = genpd_power_on(pd, 0);
        genpd_unlock(pd);
 out:
        return ret ? -EPROBE_DEFER : 0;
index 58fcc758334e5bd230a5929a70844d35f4615628..271bec73185ee58976df939cc6fcb561dceb7f82 100644 (file)
  *
  * This QoS design is best effort based. Dependents register their QoS needs.
  * Watchers register to keep track of the current QoS needs of the system.
- * Watchers can register different types of notification callbacks:
- *  . a per-device notification callback using the dev_pm_qos_*_notifier API.
- *    The notification chain data is stored in the per-device constraint
- *    data struct.
- *  . a system-wide notification callback using the dev_pm_qos_*_global_notifier
- *    API. The notification chain data is stored in a static variable.
+ * Watchers can register a per-device notification callback using the
+ * dev_pm_qos_*_notifier API. The notification chain data is stored in the
+ * per-device constraint data struct.
  *
  * Note about the per-device constraint data struct allocation:
  * . The per-device constraints data struct ptr is tored into the device
@@ -49,8 +46,6 @@
 static DEFINE_MUTEX(dev_pm_qos_mtx);
 static DEFINE_MUTEX(dev_pm_qos_sysfs_mtx);
 
-static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers);
-
 /**
  * __dev_pm_qos_flags - Check PM QoS flags for a given device.
  * @dev: Device to check the PM QoS flags for.
@@ -135,8 +130,7 @@ s32 dev_pm_qos_read_value(struct device *dev)
  * @value: Value to assign to the QoS request.
  *
  * Internal function to update the constraints list using the PM QoS core
- * code and if needed call the per-device and the global notification
- * callbacks
+ * code and if needed call the per-device callbacks.
  */
 static int apply_constraint(struct dev_pm_qos_request *req,
                            enum pm_qos_req_action action, s32 value)
@@ -148,12 +142,6 @@ static int apply_constraint(struct dev_pm_qos_request *req,
        case DEV_PM_QOS_RESUME_LATENCY:
                ret = pm_qos_update_target(&qos->resume_latency,
                                           &req->data.pnode, action, value);
-               if (ret) {
-                       value = pm_qos_read_value(&qos->resume_latency);
-                       blocking_notifier_call_chain(&dev_pm_notifiers,
-                                                    (unsigned long)value,
-                                                    req);
-               }
                break;
        case DEV_PM_QOS_LATENCY_TOLERANCE:
                ret = pm_qos_update_target(&qos->latency_tolerance,
@@ -281,7 +269,7 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
        dev->power.qos = ERR_PTR(-ENODEV);
        spin_unlock_irq(&dev->power.lock);
 
-       kfree(c->notifiers);
+       kfree(qos->resume_latency.notifiers);
        kfree(qos);
 
  out:
@@ -535,36 +523,6 @@ int dev_pm_qos_remove_notifier(struct device *dev,
 }
 EXPORT_SYMBOL_GPL(dev_pm_qos_remove_notifier);
 
-/**
- * dev_pm_qos_add_global_notifier - sets notification entry for changes to
- * target value of the PM QoS constraints for any device
- *
- * @notifier: notifier block managed by caller.
- *
- * Will register the notifier into a notification chain that gets called
- * upon changes to the target value for any device.
- */
-int dev_pm_qos_add_global_notifier(struct notifier_block *notifier)
-{
-       return blocking_notifier_chain_register(&dev_pm_notifiers, notifier);
-}
-EXPORT_SYMBOL_GPL(dev_pm_qos_add_global_notifier);
-
-/**
- * dev_pm_qos_remove_global_notifier - deletes notification for changes to
- * target value of PM QoS constraints for any device
- *
- * @notifier: notifier block to be removed.
- *
- * Will remove the notifier from the notification chain that gets called
- * upon changes to the target value for any device.
- */
-int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier)
-{
-       return blocking_notifier_chain_unregister(&dev_pm_notifiers, notifier);
-}
-EXPORT_SYMBOL_GPL(dev_pm_qos_remove_global_notifier);
-
 /**
  * dev_pm_qos_add_ancestor_request - Add PM QoS request for device's ancestor.
  * @dev: Device whose ancestor to add the request for.
index 872eac4cb1dfdcd069136856e681f741332708e2..a14fac6a01d316a0249fdd45de3e676e0dffed7c 100644 (file)
@@ -966,13 +966,13 @@ int __pm_runtime_idle(struct device *dev, int rpmflags)
        unsigned long flags;
        int retval;
 
-       might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
-
        if (rpmflags & RPM_GET_PUT) {
                if (!atomic_dec_and_test(&dev->power.usage_count))
                        return 0;
        }
 
+       might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
+
        spin_lock_irqsave(&dev->power.lock, flags);
        retval = rpm_idle(dev, rpmflags);
        spin_unlock_irqrestore(&dev->power.lock, flags);
@@ -998,13 +998,13 @@ int __pm_runtime_suspend(struct device *dev, int rpmflags)
        unsigned long flags;
        int retval;
 
-       might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
-
        if (rpmflags & RPM_GET_PUT) {
                if (!atomic_dec_and_test(&dev->power.usage_count))
                        return 0;
        }
 
+       might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
+
        spin_lock_irqsave(&dev->power.lock, flags);
        retval = rpm_suspend(dev, rpmflags);
        spin_unlock_irqrestore(&dev->power.lock, flags);
@@ -1029,7 +1029,8 @@ int __pm_runtime_resume(struct device *dev, int rpmflags)
        unsigned long flags;
        int retval;
 
-       might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
+       might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe &&
+                       dev->power.runtime_status != RPM_ACTIVE);
 
        if (rpmflags & RPM_GET_PUT)
                atomic_inc(&dev->power.usage_count);
index 404d94c6c8bc6a4531b0666a77c5b86daab207a6..ae0429827f31052fff58362131e171f33ff27a17 100644 (file)
@@ -141,6 +141,13 @@ static irqreturn_t handle_threaded_wake_irq(int irq, void *_wirq)
        struct wake_irq *wirq = _wirq;
        int res;
 
+       /* Maybe abort suspend? */
+       if (irqd_is_wakeup_set(irq_get_irq_data(irq))) {
+               pm_wakeup_event(wirq->dev, 0);
+
+               return IRQ_HANDLED;
+       }
+
        /* We don't want RPM_ASYNC or RPM_NOWAIT here */
        res = pm_runtime_resume(wirq->dev);
        if (res < 0)
@@ -183,6 +190,9 @@ int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
        wirq->irq = irq;
        irq_set_status_flags(irq, IRQ_NOAUTOEN);
 
+       /* Prevent deferred spurious wakeirqs with disable_irq_nosync() */
+       irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY);
+
        /*
         * Consumer device may need to power up and restore state
         * so we use a threaded irq.
@@ -312,8 +322,12 @@ void dev_pm_arm_wake_irq(struct wake_irq *wirq)
        if (!wirq)
                return;
 
-       if (device_may_wakeup(wirq->dev))
+       if (device_may_wakeup(wirq->dev)) {
+               if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED)
+                       enable_irq(wirq->irq);
+
                enable_irq_wake(wirq->irq);
+       }
 }
 
 /**
@@ -328,6 +342,10 @@ void dev_pm_disarm_wake_irq(struct wake_irq *wirq)
        if (!wirq)
                return;
 
-       if (device_may_wakeup(wirq->dev))
+       if (device_may_wakeup(wirq->dev)) {
                disable_irq_wake(wirq->irq);
+
+               if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED)
+                       disable_irq_nosync(wirq->irq);
+       }
 }
index b11af3f2c1dbf2e1d6faf31cb7bc38e663b2285c..b1e9aae9a5d0be6e035c39472e8e3cdbbdc3abfb 100644 (file)
@@ -81,7 +81,7 @@ static struct regcache_rbtree_node *regcache_rbtree_lookup(struct regmap *map,
 
        node = rbtree_ctx->root.rb_node;
        while (node) {
-               rbnode = container_of(node, struct regcache_rbtree_node, node);
+               rbnode = rb_entry(node, struct regcache_rbtree_node, node);
                regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
                                                 &top_reg);
                if (reg >= base_reg && reg <= top_reg) {
@@ -108,8 +108,7 @@ static int regcache_rbtree_insert(struct regmap *map, struct rb_root *root,
        parent = NULL;
        new = &root->rb_node;
        while (*new) {
-               rbnode_tmp = container_of(*new, struct regcache_rbtree_node,
-                                         node);
+               rbnode_tmp = rb_entry(*new, struct regcache_rbtree_node, node);
                /* base and top registers of the current rbnode */
                regcache_rbtree_get_base_top_reg(map, rbnode_tmp, &base_reg_tmp,
                                                 &top_reg_tmp);
@@ -152,7 +151,7 @@ static int rbtree_show(struct seq_file *s, void *ignored)
 
        for (node = rb_first(&rbtree_ctx->root); node != NULL;
             node = rb_next(node)) {
-               n = container_of(node, struct regcache_rbtree_node, node);
+               n = rb_entry(node, struct regcache_rbtree_node, node);
                mem_size += sizeof(*n);
                mem_size += (n->blklen * map->cache_word_size);
                mem_size += BITS_TO_LONGS(n->blklen) * sizeof(long);
index 4e582561e1e7a0cdffeaf8b3901d3a56d66a37ad..b0a0dcf32fb7d05a0abdf725c158c47453652e34 100644 (file)
@@ -224,7 +224,7 @@ void regcache_exit(struct regmap *map)
 }
 
 /**
- * regcache_read: Fetch the value of a given register from the cache.
+ * regcache_read - Fetch the value of a given register from the cache.
  *
  * @map: map to configure.
  * @reg: The register index.
@@ -255,7 +255,7 @@ int regcache_read(struct regmap *map,
 }
 
 /**
- * regcache_write: Set the value of a given register in the cache.
+ * regcache_write - Set the value of a given register in the cache.
  *
  * @map: map to configure.
  * @reg: The register index.
@@ -328,7 +328,7 @@ static int regcache_default_sync(struct regmap *map, unsigned int min,
 }
 
 /**
- * regcache_sync: Sync the register cache with the hardware.
+ * regcache_sync - Sync the register cache with the hardware.
  *
  * @map: map to configure.
  *
@@ -396,7 +396,7 @@ out:
 EXPORT_SYMBOL_GPL(regcache_sync);
 
 /**
- * regcache_sync_region: Sync part  of the register cache with the hardware.
+ * regcache_sync_region - Sync part  of the register cache with the hardware.
  *
  * @map: map to sync.
  * @min: first register to sync
@@ -452,7 +452,7 @@ out:
 EXPORT_SYMBOL_GPL(regcache_sync_region);
 
 /**
- * regcache_drop_region: Discard part of the register cache
+ * regcache_drop_region - Discard part of the register cache
  *
  * @map: map to operate on
  * @min: first register to discard
@@ -483,10 +483,10 @@ int regcache_drop_region(struct regmap *map, unsigned int min,
 EXPORT_SYMBOL_GPL(regcache_drop_region);
 
 /**
- * regcache_cache_only: Put a register map into cache only mode
+ * regcache_cache_only - Put a register map into cache only mode
  *
  * @map: map to configure
- * @cache_only: flag if changes should be written to the hardware
+ * @enable: flag if changes should be written to the hardware
  *
  * When a register map is marked as cache only writes to the register
  * map API will only update the register cache, they will not cause
@@ -505,7 +505,7 @@ void regcache_cache_only(struct regmap *map, bool enable)
 EXPORT_SYMBOL_GPL(regcache_cache_only);
 
 /**
- * regcache_mark_dirty: Indicate that HW registers were reset to default values
+ * regcache_mark_dirty - Indicate that HW registers were reset to default values
  *
  * @map: map to mark
  *
@@ -527,10 +527,10 @@ void regcache_mark_dirty(struct regmap *map)
 EXPORT_SYMBOL_GPL(regcache_mark_dirty);
 
 /**
- * regcache_cache_bypass: Put a register map into cache bypass mode
+ * regcache_cache_bypass - Put a register map into cache bypass mode
  *
  * @map: map to configure
- * @cache_bypass: flag if changes should not be written to the cache
+ * @enable: flag if changes should not be written to the cache
  *
  * When a register map is marked with the cache bypass option, writes
  * to the register map API will only update the hardware and not the
index ec262476d04387053ffc091b6c24993f5ce15415..cd54189f2b1d4d18dd62172385848a75887bbc87 100644 (file)
@@ -398,13 +398,14 @@ static const struct irq_domain_ops regmap_domain_ops = {
 };
 
 /**
- * regmap_add_irq_chip(): Use standard regmap IRQ controller handling
+ * regmap_add_irq_chip() - Use standard regmap IRQ controller handling
  *
- * map:       The regmap for the device.
- * irq:       The IRQ the device uses to signal interrupts
- * irq_flags: The IRQF_ flags to use for the primary interrupt.
- * chip:      Configuration for the interrupt controller.
- * data:      Runtime data structure for the controller, allocated on success
+ * @map: The regmap for the device.
+ * @irq: The IRQ the device uses to signal interrupts.
+ * @irq_flags: The IRQF_ flags to use for the primary interrupt.
+ * @irq_base: Allocate at specific IRQ number if irq_base > 0.
+ * @chip: Configuration for the interrupt controller.
+ * @data: Runtime data structure for the controller, allocated on success.
  *
  * Returns 0 on success or an errno on failure.
  *
@@ -659,12 +660,12 @@ err_alloc:
 EXPORT_SYMBOL_GPL(regmap_add_irq_chip);
 
 /**
- * regmap_del_irq_chip(): Stop interrupt handling for a regmap IRQ chip
+ * regmap_del_irq_chip() - Stop interrupt handling for a regmap IRQ chip
  *
  * @irq: Primary IRQ for the device
- * @d:   regmap_irq_chip_data allocated by regmap_add_irq_chip()
+ * @d: &regmap_irq_chip_data allocated by regmap_add_irq_chip()
  *
- * This function also dispose all mapped irq on chip.
+ * This function also disposes of all mapped IRQs on the chip.
  */
 void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
 {
@@ -723,18 +724,19 @@ static int devm_regmap_irq_chip_match(struct device *dev, void *res, void *data)
 }
 
 /**
- * devm_regmap_add_irq_chip(): Resource manager regmap_add_irq_chip()
+ * devm_regmap_add_irq_chip() - Resource manager regmap_add_irq_chip()
  *
- * @dev:       The device pointer on which irq_chip belongs to.
- * @map:       The regmap for the device.
- * @irq:       The IRQ the device uses to signal interrupts
+ * @dev: The device pointer on which irq_chip belongs to.
+ * @map: The regmap for the device.
+ * @irq: The IRQ the device uses to signal interrupts
  * @irq_flags: The IRQF_ flags to use for the primary interrupt.
- * @chip:      Configuration for the interrupt controller.
- * @data:      Runtime data structure for the controller, allocated on success
+ * @irq_base: Allocate at specific IRQ number if irq_base > 0.
+ * @chip: Configuration for the interrupt controller.
+ * @data: Runtime data structure for the controller, allocated on success
  *
  * Returns 0 on success or an errno on failure.
  *
- * The regmap_irq_chip data automatically be released when the device is
+ * The &regmap_irq_chip_data will be automatically released when the device is
  * unbound.
  */
 int devm_regmap_add_irq_chip(struct device *dev, struct regmap *map, int irq,
@@ -765,11 +767,13 @@ int devm_regmap_add_irq_chip(struct device *dev, struct regmap *map, int irq,
 EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip);
 
 /**
- * devm_regmap_del_irq_chip(): Resource managed regmap_del_irq_chip()
+ * devm_regmap_del_irq_chip() - Resource managed regmap_del_irq_chip()
  *
  * @dev: Device for which which resource was allocated.
- * @irq: Primary IRQ for the device
- * @d:   regmap_irq_chip_data allocated by regmap_add_irq_chip()
+ * @irq: Primary IRQ for the device.
+ * @data: &regmap_irq_chip_data allocated by regmap_add_irq_chip().
+ *
+ * A resource managed version of regmap_del_irq_chip().
  */
 void devm_regmap_del_irq_chip(struct device *dev, int irq,
                              struct regmap_irq_chip_data *data)
@@ -786,11 +790,11 @@ void devm_regmap_del_irq_chip(struct device *dev, int irq,
 EXPORT_SYMBOL_GPL(devm_regmap_del_irq_chip);
 
 /**
- * regmap_irq_chip_get_base(): Retrieve interrupt base for a regmap IRQ chip
+ * regmap_irq_chip_get_base() - Retrieve interrupt base for a regmap IRQ chip
  *
- * Useful for drivers to request their own IRQs.
+ * @data: regmap irq controller to operate on.
  *
- * @data: regmap_irq controller to operate on.
+ * Useful for drivers to request their own IRQs.
  */
 int regmap_irq_chip_get_base(struct regmap_irq_chip_data *data)
 {
@@ -800,12 +804,12 @@ int regmap_irq_chip_get_base(struct regmap_irq_chip_data *data)
 EXPORT_SYMBOL_GPL(regmap_irq_chip_get_base);
 
 /**
- * regmap_irq_get_virq(): Map an interrupt on a chip to a virtual IRQ
+ * regmap_irq_get_virq() - Map an interrupt on a chip to a virtual IRQ
  *
- * Useful for drivers to request their own IRQs.
+ * @data: regmap irq controller to operate on.
+ * @irq: index of the interrupt requested in the chip IRQs.
  *
- * @data: regmap_irq controller to operate on.
- * @irq: index of the interrupt requested in the chip IRQs
+ * Useful for drivers to request their own IRQs.
  */
 int regmap_irq_get_virq(struct regmap_irq_chip_data *data, int irq)
 {
@@ -818,14 +822,14 @@ int regmap_irq_get_virq(struct regmap_irq_chip_data *data, int irq)
 EXPORT_SYMBOL_GPL(regmap_irq_get_virq);
 
 /**
- * regmap_irq_get_domain(): Retrieve the irq_domain for the chip
+ * regmap_irq_get_domain() - Retrieve the irq_domain for the chip
+ *
+ * @data: regmap_irq controller to operate on.
  *
  * Useful for drivers to request their own IRQs and for integration
  * with subsystems.  For ease of integration NULL is accepted as a
  * domain, allowing devices to just call this even if no domain is
  * allocated.
- *
- * @data: regmap_irq controller to operate on.
  */
 struct irq_domain *regmap_irq_get_domain(struct regmap_irq_chip_data *data)
 {
index ae63bb0875ea8388227b0b2285fc61993e34a367..b9a779a4a739cda497351be2b2dc51d69f35c022 100644 (file)
@@ -459,7 +459,7 @@ static bool _regmap_range_add(struct regmap *map,
 
        while (*new) {
                struct regmap_range_node *this =
-                       container_of(*new, struct regmap_range_node, node);
+                       rb_entry(*new, struct regmap_range_node, node);
 
                parent = *new;
                if (data->range_max < this->range_min)
@@ -483,7 +483,7 @@ static struct regmap_range_node *_regmap_range_lookup(struct regmap *map,
 
        while (node) {
                struct regmap_range_node *this =
-                       container_of(node, struct regmap_range_node, node);
+                       rb_entry(node, struct regmap_range_node, node);
 
                if (reg < this->range_min)
                        node = node->rb_left;
@@ -1091,8 +1091,7 @@ static void regmap_field_init(struct regmap_field *rm_field,
 }
 
 /**
- * devm_regmap_field_alloc(): Allocate and initialise a register field
- * in a register map.
+ * devm_regmap_field_alloc() - Allocate and initialise a register field.
  *
  * @dev: Device that will be interacted with
  * @regmap: regmap bank in which this register field is located.
@@ -1118,13 +1117,15 @@ struct regmap_field *devm_regmap_field_alloc(struct device *dev,
 EXPORT_SYMBOL_GPL(devm_regmap_field_alloc);
 
 /**
- * devm_regmap_field_free(): Free register field allocated using
- * devm_regmap_field_alloc. Usally drivers need not call this function,
- * as the memory allocated via devm will be freed as per device-driver
- * life-cyle.
+ * devm_regmap_field_free() - Free a register field allocated using
+ *                            devm_regmap_field_alloc.
  *
  * @dev: Device that will be interacted with
  * @field: regmap field which should be freed.
+ *
+ * Free register field allocated using devm_regmap_field_alloc(). Usually
+ * drivers need not call this function, as the memory allocated via devm
+ * will be freed as per device-driver life-cyle.
  */
 void devm_regmap_field_free(struct device *dev,
        struct regmap_field *field)
@@ -1134,8 +1135,7 @@ void devm_regmap_field_free(struct device *dev,
 EXPORT_SYMBOL_GPL(devm_regmap_field_free);
 
 /**
- * regmap_field_alloc(): Allocate and initialise a register field
- * in a register map.
+ * regmap_field_alloc() - Allocate and initialise a register field.
  *
  * @regmap: regmap bank in which this register field is located.
  * @reg_field: Register field with in the bank.
@@ -1159,7 +1159,8 @@ struct regmap_field *regmap_field_alloc(struct regmap *regmap,
 EXPORT_SYMBOL_GPL(regmap_field_alloc);
 
 /**
- * regmap_field_free(): Free register field allocated using regmap_field_alloc
+ * regmap_field_free() - Free register field allocated using
+ *                       regmap_field_alloc.
  *
  * @field: regmap field which should be freed.
  */
@@ -1170,7 +1171,7 @@ void regmap_field_free(struct regmap_field *field)
 EXPORT_SYMBOL_GPL(regmap_field_free);
 
 /**
- * regmap_reinit_cache(): Reinitialise the current register cache
+ * regmap_reinit_cache() - Reinitialise the current register cache
  *
  * @map: Register map to operate on.
  * @config: New configuration.  Only the cache data will be used.
@@ -1205,7 +1206,9 @@ int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config)
 EXPORT_SYMBOL_GPL(regmap_reinit_cache);
 
 /**
- * regmap_exit(): Free a previously allocated register map
+ * regmap_exit() - Free a previously allocated register map
+ *
+ * @map: Register map to operate on.
  */
 void regmap_exit(struct regmap *map)
 {
@@ -1245,7 +1248,7 @@ static int dev_get_regmap_match(struct device *dev, void *res, void *data)
 }
 
 /**
- * dev_get_regmap(): Obtain the regmap (if any) for a device
+ * dev_get_regmap() - Obtain the regmap (if any) for a device
  *
  * @dev: Device to retrieve the map for
  * @name: Optional name for the register map, usually NULL.
@@ -1268,7 +1271,7 @@ struct regmap *dev_get_regmap(struct device *dev, const char *name)
 EXPORT_SYMBOL_GPL(dev_get_regmap);
 
 /**
- * regmap_get_device(): Obtain the device from a regmap
+ * regmap_get_device() - Obtain the device from a regmap
  *
  * @map: Register map to operate on.
  *
@@ -1654,7 +1657,7 @@ int _regmap_write(struct regmap *map, unsigned int reg,
 }
 
 /**
- * regmap_write(): Write a value to a single register
+ * regmap_write() - Write a value to a single register
  *
  * @map: Register map to write to
  * @reg: Register to write to
@@ -1681,7 +1684,7 @@ int regmap_write(struct regmap *map, unsigned int reg, unsigned int val)
 EXPORT_SYMBOL_GPL(regmap_write);
 
 /**
- * regmap_write_async(): Write a value to a single register asynchronously
+ * regmap_write_async() - Write a value to a single register asynchronously
  *
  * @map: Register map to write to
  * @reg: Register to write to
@@ -1712,7 +1715,7 @@ int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val)
 EXPORT_SYMBOL_GPL(regmap_write_async);
 
 /**
- * regmap_raw_write(): Write raw values to one or more registers
+ * regmap_raw_write() - Write raw values to one or more registers
  *
  * @map: Register map to write to
  * @reg: Initial register to write to
@@ -1750,9 +1753,8 @@ int regmap_raw_write(struct regmap *map, unsigned int reg,
 EXPORT_SYMBOL_GPL(regmap_raw_write);
 
 /**
- * regmap_field_update_bits_base():
- *     Perform a read/modify/write cycle on the register field
- *     with change, async, force option
+ * regmap_field_update_bits_base() - Perform a read/modify/write cycle a
+ *                                   register field.
  *
  * @field: Register field to write to
  * @mask: Bitmask to change
@@ -1761,6 +1763,9 @@ EXPORT_SYMBOL_GPL(regmap_raw_write);
  * @async: Boolean indicating asynchronously
  * @force: Boolean indicating use force update
  *
+ * Perform a read/modify/write cycle on the register field with change,
+ * async, force option.
+ *
  * A value of zero will be returned on success, a negative errno will
  * be returned in error cases.
  */
@@ -1777,9 +1782,8 @@ int regmap_field_update_bits_base(struct regmap_field *field,
 EXPORT_SYMBOL_GPL(regmap_field_update_bits_base);
 
 /**
- * regmap_fields_update_bits_base():
- *     Perform a read/modify/write cycle on the register field
- *     with change, async, force option
+ * regmap_fields_update_bits_base() - Perform a read/modify/write cycle a
+ *                                    register field with port ID
  *
  * @field: Register field to write to
  * @id: port ID
@@ -1808,8 +1812,8 @@ int regmap_fields_update_bits_base(struct regmap_field *field,  unsigned int id,
 }
 EXPORT_SYMBOL_GPL(regmap_fields_update_bits_base);
 
-/*
- * regmap_bulk_write(): Write multiple registers to the device
+/**
+ * regmap_bulk_write() - Write multiple registers to the device
  *
  * @map: Register map to write to
  * @reg: First register to be write from
@@ -2174,18 +2178,18 @@ static int _regmap_multi_reg_write(struct regmap *map,
        return _regmap_raw_multi_reg_write(map, regs, num_regs);
 }
 
-/*
- * regmap_multi_reg_write(): Write multiple registers to the device
- *
- * where the set of register,value pairs are supplied in any order,
- * possibly not all in a single range.
+/**
+ * regmap_multi_reg_write() - Write multiple registers to the device
  *
  * @map: Register map to write to
  * @regs: Array of structures containing register,value to be written
  * @num_regs: Number of registers to write
  *
+ * Write multiple registers to the device where the set of register, value
+ * pairs are supplied in any order, possibly not all in a single range.
+ *
  * The 'normal' block write mode will send ultimately send data on the
- * target bus as R,V1,V2,V3,..,Vn where successively higer registers are
+ * target bus as R,V1,V2,V3,..,Vn where successively higher registers are
  * addressed. However, this alternative block multi write mode will send
  * the data as R1,V1,R2,V2,..,Rn,Vn on the target bus. The target device
  * must of course support the mode.
@@ -2208,16 +2212,17 @@ int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs,
 }
 EXPORT_SYMBOL_GPL(regmap_multi_reg_write);
 
-/*
- * regmap_multi_reg_write_bypassed(): Write multiple registers to the
- *                                    device but not the cache
- *
- * where the set of register are supplied in any order
+/**
+ * regmap_multi_reg_write_bypassed() - Write multiple registers to the
+ *                                     device but not the cache
  *
  * @map: Register map to write to
  * @regs: Array of structures containing register,value to be written
  * @num_regs: Number of registers to write
  *
+ * Write multiple registers to the device but not the cache where the set
+ * of register are supplied in any order.
+ *
  * This function is intended to be used for writing a large block of data
  * atomically to the device in single transfer for those I2C client devices
  * that implement this alternative block write mode.
@@ -2248,8 +2253,8 @@ int regmap_multi_reg_write_bypassed(struct regmap *map,
 EXPORT_SYMBOL_GPL(regmap_multi_reg_write_bypassed);
 
 /**
- * regmap_raw_write_async(): Write raw values to one or more registers
- *                           asynchronously
+ * regmap_raw_write_async() - Write raw values to one or more registers
+ *                            asynchronously
  *
  * @map: Register map to write to
  * @reg: Initial register to write to
@@ -2385,7 +2390,7 @@ static int _regmap_read(struct regmap *map, unsigned int reg,
 }
 
 /**
- * regmap_read(): Read a value from a single register
+ * regmap_read() - Read a value from a single register
  *
  * @map: Register map to read from
  * @reg: Register to be read from
@@ -2412,7 +2417,7 @@ int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val)
 EXPORT_SYMBOL_GPL(regmap_read);
 
 /**
- * regmap_raw_read(): Read raw data from the device
+ * regmap_raw_read() - Read raw data from the device
  *
  * @map: Register map to read from
  * @reg: First register to be read from
@@ -2477,7 +2482,7 @@ int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
 EXPORT_SYMBOL_GPL(regmap_raw_read);
 
 /**
- * regmap_field_read(): Read a value to a single register field
+ * regmap_field_read() - Read a value to a single register field
  *
  * @field: Register field to read from
  * @val: Pointer to store read value
@@ -2502,7 +2507,7 @@ int regmap_field_read(struct regmap_field *field, unsigned int *val)
 EXPORT_SYMBOL_GPL(regmap_field_read);
 
 /**
- * regmap_fields_read(): Read a value to a single register field with port ID
+ * regmap_fields_read() - Read a value to a single register field with port ID
  *
  * @field: Register field to read from
  * @id: port ID
@@ -2535,7 +2540,7 @@ int regmap_fields_read(struct regmap_field *field, unsigned int id,
 EXPORT_SYMBOL_GPL(regmap_fields_read);
 
 /**
- * regmap_bulk_read(): Read multiple registers from the device
+ * regmap_bulk_read() - Read multiple registers from the device
  *
  * @map: Register map to read from
  * @reg: First register to be read from
@@ -2692,9 +2697,7 @@ static int _regmap_update_bits(struct regmap *map, unsigned int reg,
 }
 
 /**
- * regmap_update_bits_base:
- *     Perform a read/modify/write cycle on the
- *     register map with change, async, force option
+ * regmap_update_bits_base() - Perform a read/modify/write cycle on a register
  *
  * @map: Register map to update
  * @reg: Register to update
@@ -2704,10 +2707,14 @@ static int _regmap_update_bits(struct regmap *map, unsigned int reg,
  * @async: Boolean indicating asynchronously
  * @force: Boolean indicating use force update
  *
- * if async was true,
- * With most buses the read must be done synchronously so this is most
- * useful for devices with a cache which do not need to interact with
- * the hardware to determine the current register value.
+ * Perform a read/modify/write cycle on a register map with change, async, force
+ * options.
+ *
+ * If async is true:
+ *
+ * With most buses the read must be done synchronously so this is most useful
+ * for devices with a cache which do not need to interact with the hardware to
+ * determine the current register value.
  *
  * Returns zero for success, a negative number on error.
  */
@@ -2765,7 +2772,7 @@ static int regmap_async_is_done(struct regmap *map)
 }
 
 /**
- * regmap_async_complete: Ensure all asynchronous I/O has completed.
+ * regmap_async_complete - Ensure all asynchronous I/O has completed.
  *
  * @map: Map to operate on.
  *
@@ -2797,8 +2804,8 @@ int regmap_async_complete(struct regmap *map)
 EXPORT_SYMBOL_GPL(regmap_async_complete);
 
 /**
- * regmap_register_patch: Register and apply register updates to be applied
- *                        on device initialistion
+ * regmap_register_patch - Register and apply register updates to be applied
+ *                         on device initialistion
  *
  * @map: Register map to apply updates to.
  * @regs: Values to update.
@@ -2855,8 +2862,10 @@ int regmap_register_patch(struct regmap *map, const struct reg_sequence *regs,
 }
 EXPORT_SYMBOL_GPL(regmap_register_patch);
 
-/*
- * regmap_get_val_bytes(): Report the size of a register value
+/**
+ * regmap_get_val_bytes() - Report the size of a register value
+ *
+ * @map: Register map to operate on.
  *
  * Report the size of a register value, mainly intended to for use by
  * generic infrastructure built on top of regmap.
@@ -2871,7 +2880,9 @@ int regmap_get_val_bytes(struct regmap *map)
 EXPORT_SYMBOL_GPL(regmap_get_val_bytes);
 
 /**
- * regmap_get_max_register(): Report the max register value
+ * regmap_get_max_register() - Report the max register value
+ *
+ * @map: Register map to operate on.
  *
  * Report the max register value, mainly intended to for use by
  * generic infrastructure built on top of regmap.
@@ -2883,7 +2894,9 @@ int regmap_get_max_register(struct regmap *map)
 EXPORT_SYMBOL_GPL(regmap_get_max_register);
 
 /**
- * regmap_get_reg_stride(): Report the register address stride
+ * regmap_get_reg_stride() - Report the register address stride
+ *
+ * @map: Register map to operate on.
  *
  * Report the register address stride, mainly intended to for use by
  * generic infrastructure built on top of regmap.
index f642c4264c277bc05d98dc99eb15ac8091886ba5..168fa175d65a08319f5e92ad10f8f5ff4baea54a 100644 (file)
@@ -45,6 +45,9 @@ int bcma_sprom_get(struct bcma_bus *bus);
 void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc);
 void bcma_core_chipcommon_init(struct bcma_drv_cc *cc);
 void bcma_chipco_bcm4331_ext_pa_lines_ctl(struct bcma_drv_cc *cc, bool enable);
+#ifdef CONFIG_BCMA_DRIVER_MIPS
+void bcma_chipco_serial_init(struct bcma_drv_cc *cc);
+#endif /* CONFIG_BCMA_DRIVER_MIPS */
 
 /* driver_chipcommon_b.c */
 int bcma_core_chipcommon_b_init(struct bcma_drv_cc_b *ccb);
index b4f6520e74f05b8b7a0c1ec710042c0f38a02d7d..62f5bfa5065d919ee3acd9e596373923070e574a 100644 (file)
@@ -15,8 +15,6 @@
 #include <linux/platform_device.h>
 #include <linux/bcma/bcma.h>
 
-static void bcma_chipco_serial_init(struct bcma_drv_cc *cc);
-
 static inline u32 bcma_cc_write32_masked(struct bcma_drv_cc *cc, u16 offset,
                                         u32 mask, u32 value)
 {
@@ -186,9 +184,6 @@ void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc)
        if (cc->capabilities & BCMA_CC_CAP_PMU)
                bcma_pmu_early_init(cc);
 
-       if (IS_BUILTIN(CONFIG_BCM47XX) && bus->hosttype == BCMA_HOSTTYPE_SOC)
-               bcma_chipco_serial_init(cc);
-
        if (bus->hosttype == BCMA_HOSTTYPE_SOC)
                bcma_core_chipcommon_flash_detect(cc);
 
@@ -378,9 +373,9 @@ u32 bcma_chipco_gpio_pulldown(struct bcma_drv_cc *cc, u32 mask, u32 value)
        return res;
 }
 
-static void bcma_chipco_serial_init(struct bcma_drv_cc *cc)
+#ifdef CONFIG_BCMA_DRIVER_MIPS
+void bcma_chipco_serial_init(struct bcma_drv_cc *cc)
 {
-#if IS_BUILTIN(CONFIG_BCM47XX)
        unsigned int irq;
        u32 baud_base;
        u32 i;
@@ -422,5 +417,5 @@ static void bcma_chipco_serial_init(struct bcma_drv_cc *cc)
                ports[i].baud_base = baud_base;
                ports[i].reg_shift = 0;
        }
-#endif /* CONFIG_BCM47XX */
 }
+#endif /* CONFIG_BCMA_DRIVER_MIPS */
index 96f17132820080843e9216523bc1328b2f8f7939..89af807cf29ce49e38f60e9e1c3e177ceb0e261a 100644 (file)
@@ -278,9 +278,12 @@ static void bcma_core_mips_nvram_init(struct bcma_drv_mips *mcore)
 
 void bcma_core_mips_early_init(struct bcma_drv_mips *mcore)
 {
+       struct bcma_bus *bus = mcore->core->bus;
+
        if (mcore->early_setup_done)
                return;
 
+       bcma_chipco_serial_init(&bus->drv_cc);
        bcma_core_mips_nvram_init(mcore);
 
        mcore->early_setup_done = true;
index ab62b81c2ca7274342e58b9e5b4c0fc7ffc724fa..dece26f119d4a637f742dca833b7ec9eecd25ebe 100644 (file)
@@ -1070,7 +1070,7 @@ static int bm_rw(struct drbd_device *device, const unsigned int flags, unsigned
                .done = 0,
                .flags = flags,
                .error = 0,
-               .kref = { ATOMIC_INIT(2) },
+               .kref = KREF_INIT(2),
        };
 
        if (!get_ldev_if_state(device, D_ATTACHING)) {  /* put is in drbd_bm_aio_ctx_destroy() */
index 83482721bc012739cf25ee627fd2b85b2fd094ab..c3ff60c30dde06c5ac3f8efa4636806cb24c682c 100644 (file)
@@ -2948,7 +2948,6 @@ void drbd_delete_device(struct drbd_device *device)
        struct drbd_resource *resource = device->resource;
        struct drbd_connection *connection;
        struct drbd_peer_device *peer_device;
-       int refs = 3;
 
        /* move to free_peer_device() */
        for_each_peer_device(peer_device, device)
@@ -2956,13 +2955,15 @@ void drbd_delete_device(struct drbd_device *device)
        drbd_debugfs_device_cleanup(device);
        for_each_connection(connection, resource) {
                idr_remove(&connection->peer_devices, device->vnr);
-               refs++;
+               kref_put(&device->kref, drbd_destroy_device);
        }
        idr_remove(&resource->devices, device->vnr);
+       kref_put(&device->kref, drbd_destroy_device);
        idr_remove(&drbd_devices, device_to_minor(device));
+       kref_put(&device->kref, drbd_destroy_device);
        del_gendisk(device->vdisk);
        synchronize_rcu();
-       kref_sub(&device->kref, refs, drbd_destroy_device);
+       kref_put(&device->kref, drbd_destroy_device);
 }
 
 static int __init drbd_init(void)
index de279fe4e4fdb43a0d9458227b862ecdacb0aaa8..b489ac2e9c4446d74f1bfeb5b7230b152c17ce55 100644 (file)
@@ -421,7 +421,6 @@ static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m,
        struct drbd_peer_device *peer_device = first_peer_device(device);
        unsigned s = req->rq_state;
        int c_put = 0;
-       int k_put = 0;
 
        if (drbd_suspended(device) && !((s | clear) & RQ_COMPLETION_SUSP))
                set |= RQ_COMPLETION_SUSP;
@@ -437,6 +436,8 @@ static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m,
 
        /* intent: get references */
 
+       kref_get(&req->kref);
+
        if (!(s & RQ_LOCAL_PENDING) && (set & RQ_LOCAL_PENDING))
                atomic_inc(&req->completion_ref);
 
@@ -473,15 +474,12 @@ static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m,
 
        if (!(s & RQ_LOCAL_ABORTED) && (set & RQ_LOCAL_ABORTED)) {
                D_ASSERT(device, req->rq_state & RQ_LOCAL_PENDING);
-               /* local completion may still come in later,
-                * we need to keep the req object around. */
-               kref_get(&req->kref);
                ++c_put;
        }
 
        if ((s & RQ_LOCAL_PENDING) && (clear & RQ_LOCAL_PENDING)) {
                if (req->rq_state & RQ_LOCAL_ABORTED)
-                       ++k_put;
+                       kref_put(&req->kref, drbd_req_destroy);
                else
                        ++c_put;
                list_del_init(&req->req_pending_local);
@@ -503,7 +501,7 @@ static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m,
                if (s & RQ_NET_SENT)
                        atomic_sub(req->i.size >> 9, &device->ap_in_flight);
                if (s & RQ_EXP_BARR_ACK)
-                       ++k_put;
+                       kref_put(&req->kref, drbd_req_destroy);
                req->net_done_jif = jiffies;
 
                /* in ahead/behind mode, or just in case,
@@ -516,25 +514,16 @@ static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m,
 
        /* potentially complete and destroy */
 
-       if (k_put || c_put) {
-               /* Completion does it's own kref_put.  If we are going to
-                * kref_sub below, we need req to be still around then. */
-               int at_least = k_put + !!c_put;
-               int refcount = atomic_read(&req->kref.refcount);
-               if (refcount < at_least)
-                       drbd_err(device,
-                               "mod_rq_state: Logic BUG: %x -> %x: refcount = %d, should be >= %d\n",
-                               s, req->rq_state, refcount, at_least);
-       }
-
        /* If we made progress, retry conflicting peer requests, if any. */
        if (req->i.waiting)
                wake_up(&device->misc_wait);
 
-       if (c_put)
-               k_put += drbd_req_put_completion_ref(req, m, c_put);
-       if (k_put)
-               kref_sub(&req->kref, k_put, drbd_req_destroy);
+       if (c_put) {
+               if (drbd_req_put_completion_ref(req, m, c_put))
+                       kref_put(&req->kref, drbd_req_destroy);
+       } else {
+               kref_put(&req->kref, drbd_req_destroy);
+       }
 }
 
 static void drbd_report_io_error(struct drbd_device *device, struct drbd_request *req)
index 36d2b9f4e83654637925d57fce5a4fc8cd0f009e..436baa66f701c8ee88519047016c7525c2d390e6 100644 (file)
@@ -1535,7 +1535,7 @@ static bool obj_request_overlaps_parent(struct rbd_obj_request *obj_request)
 static void rbd_obj_request_get(struct rbd_obj_request *obj_request)
 {
        dout("%s: obj %p (was %d)\n", __func__, obj_request,
-               atomic_read(&obj_request->kref.refcount));
+               kref_read(&obj_request->kref));
        kref_get(&obj_request->kref);
 }
 
@@ -1544,14 +1544,14 @@ static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
 {
        rbd_assert(obj_request != NULL);
        dout("%s: obj %p (was %d)\n", __func__, obj_request,
-               atomic_read(&obj_request->kref.refcount));
+               kref_read(&obj_request->kref));
        kref_put(&obj_request->kref, rbd_obj_request_destroy);
 }
 
 static void rbd_img_request_get(struct rbd_img_request *img_request)
 {
        dout("%s: img %p (was %d)\n", __func__, img_request,
-            atomic_read(&img_request->kref.refcount));
+            kref_read(&img_request->kref));
        kref_get(&img_request->kref);
 }
 
@@ -1562,7 +1562,7 @@ static void rbd_img_request_put(struct rbd_img_request *img_request)
 {
        rbd_assert(img_request != NULL);
        dout("%s: img %p (was %d)\n", __func__, img_request,
-               atomic_read(&img_request->kref.refcount));
+               kref_read(&img_request->kref));
        if (img_request_child_test(img_request))
                kref_put(&img_request->kref, rbd_parent_request_destroy);
        else
index 10332c24f9610d7e80b154bf36eebb0354ac4576..264c5eac12b028eab2d30d2e35bd05e1fa968745 100644 (file)
@@ -770,7 +770,7 @@ static void virtblk_remove(struct virtio_device *vdev)
        /* Stop all the virtqueues. */
        vdev->config->reset(vdev);
 
-       refc = atomic_read(&disk_to_dev(vblk->disk)->kobj.kref.refcount);
+       refc = kref_read(&disk_to_dev(vblk->disk)->kobj.kref);
        put_disk(vblk->disk);
        vdev->config->del_vqs(vdev);
        kfree(vblk->vqs);
index b2bdfa81f9297cd588ffcb1168f7db1cd5a48228..265f1a7072e9a1fed8212520d509adaecdbef04b 100644 (file)
@@ -197,13 +197,13 @@ struct blkfront_info
        /* Number of pages per ring buffer. */
        unsigned int nr_ring_pages;
        struct request_queue *rq;
-       unsigned int feature_flush;
-       unsigned int feature_fua;
+       unsigned int feature_flush:1;
+       unsigned int feature_fua:1;
        unsigned int feature_discard:1;
        unsigned int feature_secdiscard:1;
+       unsigned int feature_persistent:1;
        unsigned int discard_granularity;
        unsigned int discard_alignment;
-       unsigned int feature_persistent:1;
        /* Number of 4KB segments handled */
        unsigned int max_indirect_segments;
        int is_ready;
@@ -2223,7 +2223,7 @@ static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo)
        }
        else
                grants = info->max_indirect_segments;
-       psegs = grants / GRANTS_PER_PSEG;
+       psegs = DIV_ROUND_UP(grants, GRANTS_PER_PSEG);
 
        err = fill_grant_buffer(rinfo,
                                (grants + INDIRECT_GREFS(grants)) * BLK_RING_SIZE(info));
@@ -2323,13 +2323,16 @@ static void blkfront_gather_backend_features(struct blkfront_info *info)
                blkfront_setup_discard(info);
 
        info->feature_persistent =
-               xenbus_read_unsigned(info->xbdev->otherend,
-                                    "feature-persistent", 0);
+               !!xenbus_read_unsigned(info->xbdev->otherend,
+                                      "feature-persistent", 0);
 
        indirect_segments = xenbus_read_unsigned(info->xbdev->otherend,
                                        "feature-max-indirect-segments", 0);
-       info->max_indirect_segments = min(indirect_segments,
-                                         xen_blkif_max_segments);
+       if (indirect_segments > xen_blkif_max_segments)
+               indirect_segments = xen_blkif_max_segments;
+       if (indirect_segments <= BLKIF_MAX_SEGMENTS_PER_REQUEST)
+               indirect_segments = 0;
+       info->max_indirect_segments = indirect_segments;
 }
 
 /*
@@ -2652,6 +2655,9 @@ static int __init xlblk_init(void)
        if (!xen_domain())
                return -ENODEV;
 
+       if (xen_blkif_max_segments < BLKIF_MAX_SEGMENTS_PER_REQUEST)
+               xen_blkif_max_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST;
+
        if (xen_blkif_max_ring_order > XENBUS_MAX_RING_GRANT_ORDER) {
                pr_info("Invalid max_ring_order (%d), will use default max: %d.\n",
                        xen_blkif_max_ring_order, XENBUS_MAX_RING_GRANT_ORDER);
index 6ce5ce8be2f2dda80a52790ee611106097fabdbe..87fba424817e509e6d87bc931079b5a34ffcb706 100644 (file)
@@ -92,7 +92,6 @@ static void add_early_randomness(struct hwrng *rng)
        mutex_unlock(&reading_mutex);
        if (bytes_read > 0)
                add_device_randomness(rng_buffer, bytes_read);
-       memset(rng_buffer, 0, size);
 }
 
 static inline void cleanup_rng(struct kref *kref)
@@ -288,7 +287,6 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf,
                }
        }
 out:
-       memset(rng_buffer, 0, rng_buffer_size());
        return ret ? : err;
 
 out_unlock_reading:
@@ -427,7 +425,6 @@ static int hwrng_fillfn(void *unused)
                /* Outside lock, sure, but y'know: randomness. */
                add_hwgenerator_randomness((void *)rng_fillbuf, rc,
                                           rc * current_quality * 8 >> 10);
-               memset(rng_fillbuf, 0, rng_buffer_size());
        }
        hwrng_fill = NULL;
        return 0;
index 4866f7aa32e65a84c1bc738a9d385e16aa312cd4..3356ab821624c9f41f1e84560e96b47b2b6695c2 100644 (file)
@@ -5,6 +5,10 @@ config CLKSRC_OF
        bool
        select CLKSRC_PROBE
 
+config CLKEVT_OF
+       bool
+       select CLKEVT_PROBE
+
 config CLKSRC_ACPI
        bool
        select CLKSRC_PROBE
@@ -12,6 +16,9 @@ config CLKSRC_ACPI
 config CLKSRC_PROBE
        bool
 
+config CLKEVT_PROBE
+       bool
+
 config CLKSRC_I8253
        bool
 
@@ -60,6 +67,16 @@ config DW_APB_TIMER_OF
        select DW_APB_TIMER
        select CLKSRC_OF
 
+config GEMINI_TIMER
+       bool "Cortina Gemini timer driver" if COMPILE_TEST
+       depends on GENERIC_CLOCKEVENTS
+       depends on HAS_IOMEM
+       select CLKSRC_MMIO
+       select CLKSRC_OF
+       select MFD_SYSCON
+       help
+         Enables support for the Gemini timer
+
 config ROCKCHIP_TIMER
        bool "Rockchip timer driver" if COMPILE_TEST
        depends on ARM || ARM64
@@ -325,16 +342,30 @@ config ARM_ARCH_TIMER_EVTSTREAM
          This must be disabled for hardware validation purposes to detect any
          hardware anomalies of missing events.
 
+config ARM_ARCH_TIMER_OOL_WORKAROUND
+       bool
+
 config FSL_ERRATUM_A008585
        bool "Workaround for Freescale/NXP Erratum A-008585"
        default y
        depends on ARM_ARCH_TIMER && ARM64
+       select ARM_ARCH_TIMER_OOL_WORKAROUND
        help
          This option enables a workaround for Freescale/NXP Erratum
          A-008585 ("ARM generic timer may contain an erroneous
          value").  The workaround will only be active if the
          fsl,erratum-a008585 property is found in the timer node.
 
+config HISILICON_ERRATUM_161010101
+       bool "Workaround for Hisilicon Erratum 161010101"
+       default y
+       select ARM_ARCH_TIMER_OOL_WORKAROUND
+       depends on ARM_ARCH_TIMER && ARM64
+       help
+         This option enables a workaround for Hisilicon Erratum
+         161010101. The workaround will be active if the hisilicon,erratum-161010101
+         property is found in the timer node.
+
 config ARM_GLOBAL_TIMER
        bool "Support for the ARM global timer" if COMPILE_TEST
        select CLKSRC_OF if OF
@@ -467,6 +498,13 @@ config SH_TIMER_MTU2
          Timer Pulse Unit 2 (MTU2) hardware available on SoCs from Renesas.
          This hardware comes with 16 bit-timer registers.
 
+config RENESAS_OSTM
+       bool "Renesas OSTM timer driver" if COMPILE_TEST
+       depends on GENERIC_CLOCKEVENTS
+       select CLKSRC_MMIO
+       help
+         Enables the support for the Renesas OSTM.
+
 config SH_TIMER_TMU
        bool "Renesas TMU timer driver" if COMPILE_TEST
        depends on GENERIC_CLOCKEVENTS
index a14111e1f0872d52df6799ea35ff29cdddeb7e36..d227d1314f14e58681de4e2e6a6ce44189b417da 100644 (file)
@@ -1,4 +1,5 @@
 obj-$(CONFIG_CLKSRC_PROBE)     += clksrc-probe.o
+obj-$(CONFIG_CLKEVT_PROBE)     += clkevt-probe.o
 obj-$(CONFIG_ATMEL_PIT)                += timer-atmel-pit.o
 obj-$(CONFIG_ATMEL_ST)         += timer-atmel-st.o
 obj-$(CONFIG_ATMEL_TCB_CLKSRC) += tcb_clksrc.o
@@ -8,6 +9,7 @@ obj-$(CONFIG_CS5535_CLOCK_EVENT_SRC)    += cs5535-clockevt.o
 obj-$(CONFIG_CLKSRC_JCORE_PIT)         += jcore-pit.o
 obj-$(CONFIG_SH_TIMER_CMT)     += sh_cmt.o
 obj-$(CONFIG_SH_TIMER_MTU2)    += sh_mtu2.o
+obj-$(CONFIG_RENESAS_OSTM)     += renesas-ostm.o
 obj-$(CONFIG_SH_TIMER_TMU)     += sh_tmu.o
 obj-$(CONFIG_EM_TIMER_STI)     += em_sti.o
 obj-$(CONFIG_CLKBLD_I8253)     += i8253.o
@@ -15,6 +17,7 @@ obj-$(CONFIG_CLKSRC_MMIO)     += mmio.o
 obj-$(CONFIG_DIGICOLOR_TIMER)  += timer-digicolor.o
 obj-$(CONFIG_DW_APB_TIMER)     += dw_apb_timer.o
 obj-$(CONFIG_DW_APB_TIMER_OF)  += dw_apb_timer_of.o
+obj-$(CONFIG_GEMINI_TIMER)     += timer-gemini.o
 obj-$(CONFIG_ROCKCHIP_TIMER)      += rockchip_timer.o
 obj-$(CONFIG_CLKSRC_NOMADIK_MTU)       += nomadik-mtu.o
 obj-$(CONFIG_CLKSRC_DBX500_PRCMU)      += clksrc-dbx500-prcmu.o
index 4c8c3fb2e8b248b3335d1c4f582f7faab1121d37..93aa1364376ac8d94145b7dca83240413d080517 100644 (file)
@@ -96,41 +96,107 @@ early_param("clocksource.arm_arch_timer.evtstrm", early_evtstrm_cfg);
  */
 
 #ifdef CONFIG_FSL_ERRATUM_A008585
-DEFINE_STATIC_KEY_FALSE(arch_timer_read_ool_enabled);
-EXPORT_SYMBOL_GPL(arch_timer_read_ool_enabled);
-
-static int fsl_a008585_enable = -1;
-
-static int __init early_fsl_a008585_cfg(char *buf)
+/*
+ * The number of retries is an arbitrary value well beyond the highest number
+ * of iterations the loop has been observed to take.
+ */
+#define __fsl_a008585_read_reg(reg) ({                 \
+       u64 _old, _new;                                 \
+       int _retries = 200;                             \
+                                                       \
+       do {                                            \
+               _old = read_sysreg(reg);                \
+               _new = read_sysreg(reg);                \
+               _retries--;                             \
+       } while (unlikely(_old != _new) && _retries);   \
+                                                       \
+       WARN_ON_ONCE(!_retries);                        \
+       _new;                                           \
+})
+
+static u32 notrace fsl_a008585_read_cntp_tval_el0(void)
 {
-       int ret;
-       bool val;
+       return __fsl_a008585_read_reg(cntp_tval_el0);
+}
 
-       ret = strtobool(buf, &val);
-       if (ret)
-               return ret;
+static u32 notrace fsl_a008585_read_cntv_tval_el0(void)
+{
+       return __fsl_a008585_read_reg(cntv_tval_el0);
+}
 
-       fsl_a008585_enable = val;
-       return 0;
+static u64 notrace fsl_a008585_read_cntvct_el0(void)
+{
+       return __fsl_a008585_read_reg(cntvct_el0);
 }
-early_param("clocksource.arm_arch_timer.fsl-a008585", early_fsl_a008585_cfg);
+#endif
 
-u32 __fsl_a008585_read_cntp_tval_el0(void)
+#ifdef CONFIG_HISILICON_ERRATUM_161010101
+/*
+ * Verify whether the value of the second read is larger than the first by
+ * less than 32 is the only way to confirm the value is correct, so clear the
+ * lower 5 bits to check whether the difference is greater than 32 or not.
+ * Theoretically the erratum should not occur more than twice in succession
+ * when reading the system counter, but it is possible that some interrupts
+ * may lead to more than twice read errors, triggering the warning, so setting
+ * the number of retries far beyond the number of iterations the loop has been
+ * observed to take.
+ */
+#define __hisi_161010101_read_reg(reg) ({                              \
+       u64 _old, _new;                                         \
+       int _retries = 50;                                      \
+                                                               \
+       do {                                                    \
+               _old = read_sysreg(reg);                        \
+               _new = read_sysreg(reg);                        \
+               _retries--;                                     \
+       } while (unlikely((_new - _old) >> 5) && _retries);     \
+                                                               \
+       WARN_ON_ONCE(!_retries);                                \
+       _new;                                                   \
+})
+
+static u32 notrace hisi_161010101_read_cntp_tval_el0(void)
 {
-       return __fsl_a008585_read_reg(cntp_tval_el0);
+       return __hisi_161010101_read_reg(cntp_tval_el0);
 }
 
-u32 __fsl_a008585_read_cntv_tval_el0(void)
+static u32 notrace hisi_161010101_read_cntv_tval_el0(void)
 {
-       return __fsl_a008585_read_reg(cntv_tval_el0);
+       return __hisi_161010101_read_reg(cntv_tval_el0);
 }
 
-u64 __fsl_a008585_read_cntvct_el0(void)
+static u64 notrace hisi_161010101_read_cntvct_el0(void)
 {
-       return __fsl_a008585_read_reg(cntvct_el0);
+       return __hisi_161010101_read_reg(cntvct_el0);
 }
-EXPORT_SYMBOL(__fsl_a008585_read_cntvct_el0);
-#endif /* CONFIG_FSL_ERRATUM_A008585 */
+#endif
+
+#ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND
+const struct arch_timer_erratum_workaround *timer_unstable_counter_workaround = NULL;
+EXPORT_SYMBOL_GPL(timer_unstable_counter_workaround);
+
+DEFINE_STATIC_KEY_FALSE(arch_timer_read_ool_enabled);
+EXPORT_SYMBOL_GPL(arch_timer_read_ool_enabled);
+
+static const struct arch_timer_erratum_workaround ool_workarounds[] = {
+#ifdef CONFIG_FSL_ERRATUM_A008585
+       {
+               .id = "fsl,erratum-a008585",
+               .read_cntp_tval_el0 = fsl_a008585_read_cntp_tval_el0,
+               .read_cntv_tval_el0 = fsl_a008585_read_cntv_tval_el0,
+               .read_cntvct_el0 = fsl_a008585_read_cntvct_el0,
+       },
+#endif
+#ifdef CONFIG_HISILICON_ERRATUM_161010101
+       {
+               .id = "hisilicon,erratum-161010101",
+               .read_cntp_tval_el0 = hisi_161010101_read_cntp_tval_el0,
+               .read_cntv_tval_el0 = hisi_161010101_read_cntv_tval_el0,
+               .read_cntvct_el0 = hisi_161010101_read_cntvct_el0,
+       },
+#endif
+};
+#endif /* CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND */
 
 static __always_inline
 void arch_timer_reg_write(int access, enum arch_timer_reg reg, u32 val,
@@ -281,8 +347,8 @@ static __always_inline void set_next_event(const int access, unsigned long evt,
        arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
 }
 
-#ifdef CONFIG_FSL_ERRATUM_A008585
-static __always_inline void fsl_a008585_set_next_event(const int access,
+#ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND
+static __always_inline void erratum_set_next_event_generic(const int access,
                unsigned long evt, struct clock_event_device *clk)
 {
        unsigned long ctrl;
@@ -300,20 +366,20 @@ static __always_inline void fsl_a008585_set_next_event(const int access,
        arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
 }
 
-static int fsl_a008585_set_next_event_virt(unsigned long evt,
+static int erratum_set_next_event_virt(unsigned long evt,
                                           struct clock_event_device *clk)
 {
-       fsl_a008585_set_next_event(ARCH_TIMER_VIRT_ACCESS, evt, clk);
+       erratum_set_next_event_generic(ARCH_TIMER_VIRT_ACCESS, evt, clk);
        return 0;
 }
 
-static int fsl_a008585_set_next_event_phys(unsigned long evt,
+static int erratum_set_next_event_phys(unsigned long evt,
                                           struct clock_event_device *clk)
 {
-       fsl_a008585_set_next_event(ARCH_TIMER_PHYS_ACCESS, evt, clk);
+       erratum_set_next_event_generic(ARCH_TIMER_PHYS_ACCESS, evt, clk);
        return 0;
 }
-#endif /* CONFIG_FSL_ERRATUM_A008585 */
+#endif /* CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND */
 
 static int arch_timer_set_next_event_virt(unsigned long evt,
                                          struct clock_event_device *clk)
@@ -343,16 +409,16 @@ static int arch_timer_set_next_event_phys_mem(unsigned long evt,
        return 0;
 }
 
-static void fsl_a008585_set_sne(struct clock_event_device *clk)
+static void erratum_workaround_set_sne(struct clock_event_device *clk)
 {
-#ifdef CONFIG_FSL_ERRATUM_A008585
+#ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND
        if (!static_branch_unlikely(&arch_timer_read_ool_enabled))
                return;
 
        if (arch_timer_uses_ppi == VIRT_PPI)
-               clk->set_next_event = fsl_a008585_set_next_event_virt;
+               clk->set_next_event = erratum_set_next_event_virt;
        else
-               clk->set_next_event = fsl_a008585_set_next_event_phys;
+               clk->set_next_event = erratum_set_next_event_phys;
 #endif
 }
 
@@ -385,7 +451,7 @@ static void __arch_timer_setup(unsigned type,
                        BUG();
                }
 
-               fsl_a008585_set_sne(clk);
+               erratum_workaround_set_sne(clk);
        } else {
                clk->features |= CLOCK_EVT_FEAT_DYNIRQ;
                clk->name = "arch_mem_timer";
@@ -580,7 +646,7 @@ static struct clocksource clocksource_counter = {
        .flags  = CLOCK_SOURCE_IS_CONTINUOUS,
 };
 
-static struct cyclecounter cyclecounter = {
+static struct cyclecounter cyclecounter __ro_after_init = {
        .read   = arch_counter_read_cc,
        .mask   = CLOCKSOURCE_MASK(56),
 };
@@ -605,7 +671,7 @@ static void __init arch_counter_register(unsigned type)
 
                clocksource_counter.archdata.vdso_direct = true;
 
-#ifdef CONFIG_FSL_ERRATUM_A008585
+#ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND
                /*
                 * Don't use the vdso fastpath if errata require using
                 * the out-of-line counter accessor.
@@ -893,12 +959,15 @@ static int __init arch_timer_of_init(struct device_node *np)
 
        arch_timer_c3stop = !of_property_read_bool(np, "always-on");
 
-#ifdef CONFIG_FSL_ERRATUM_A008585
-       if (fsl_a008585_enable < 0)
-               fsl_a008585_enable = of_property_read_bool(np, "fsl,erratum-a008585");
-       if (fsl_a008585_enable) {
-               static_branch_enable(&arch_timer_read_ool_enabled);
-               pr_info("Enabling workaround for FSL erratum A-008585\n");
+#ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND
+       for (i = 0; i < ARRAY_SIZE(ool_workarounds); i++) {
+               if (of_property_read_bool(np, ool_workarounds[i].id)) {
+                       timer_unstable_counter_workaround = &ool_workarounds[i];
+                       static_branch_enable(&arch_timer_read_ool_enabled);
+                       pr_info("arch_timer: Enabling workaround for %s\n",
+                               timer_unstable_counter_workaround->id);
+                       break;
+               }
        }
 #endif
 
diff --git a/drivers/clocksource/clkevt-probe.c b/drivers/clocksource/clkevt-probe.c
new file mode 100644 (file)
index 0000000..8c30fec
--- /dev/null
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2016, Linaro Ltd.  All rights reserved.
+ * Daniel Lezcano <daniel.lezcano@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/clockchip.h>
+
+extern struct of_device_id __clkevt_of_table[];
+
+static const struct of_device_id __clkevt_of_table_sentinel
+       __used __section(__clkevt_of_table_end);
+
+int __init clockevent_probe(void)
+{
+       struct device_node *np;
+       const struct of_device_id *match;
+       of_init_fn_1_ret init_func;
+       int ret, clockevents = 0;
+
+       for_each_matching_node_and_match(np, __clkevt_of_table, &match) {
+               if (!of_device_is_available(np))
+                       continue;
+
+               init_func = match->data;
+
+               ret = init_func(np);
+               if (ret) {
+                       pr_warn("Failed to initialize '%s' (%d)\n",
+                               np->name, ret);
+                       continue;
+               }
+
+               clockevents++;
+       }
+
+       if (!clockevents) {
+               pr_crit("%s: no matching clockevent found\n", __func__);
+               return -ENODEV;
+       }
+
+       return 0;
+}
diff --git a/drivers/clocksource/renesas-ostm.c b/drivers/clocksource/renesas-ostm.c
new file mode 100644 (file)
index 0000000..c76f576
--- /dev/null
@@ -0,0 +1,265 @@
+/*
+ * Renesas Timer Support - OSTM
+ *
+ * Copyright (C) 2017 Renesas Electronics America, Inc.
+ * Copyright (C) 2017 Chris Brandt
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/clk.h>
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/sched_clock.h>
+#include <linux/slab.h>
+
+/*
+ * The OSTM contains independent channels.
+ * The first OSTM channel probed will be set up as a free running
+ * clocksource. Additionally we will use this clocksource for the system
+ * schedule timer sched_clock().
+ *
+ * The second (or more) channel probed will be set up as an interrupt
+ * driven clock event.
+ */
+
+struct ostm_device {
+       void __iomem *base;
+       unsigned long ticks_per_jiffy;
+       struct clock_event_device ced;
+};
+
+static void __iomem *system_clock;     /* For sched_clock() */
+
+/* OSTM REGISTERS */
+#define        OSTM_CMP                0x000   /* RW,32 */
+#define        OSTM_CNT                0x004   /* R,32 */
+#define        OSTM_TE                 0x010   /* R,8 */
+#define        OSTM_TS                 0x014   /* W,8 */
+#define        OSTM_TT                 0x018   /* W,8 */
+#define        OSTM_CTL                0x020   /* RW,8 */
+
+#define        TE                      0x01
+#define        TS                      0x01
+#define        TT                      0x01
+#define        CTL_PERIODIC            0x00
+#define        CTL_ONESHOT             0x02
+#define        CTL_FREERUN             0x02
+
+static struct ostm_device *ced_to_ostm(struct clock_event_device *ced)
+{
+       return container_of(ced, struct ostm_device, ced);
+}
+
+static void ostm_timer_stop(struct ostm_device *ostm)
+{
+       if (readb(ostm->base + OSTM_TE) & TE) {
+               writeb(TT, ostm->base + OSTM_TT);
+
+               /*
+                * Read back the register simply to confirm the write operation
+                * has completed since I/O writes can sometimes get queued by
+                * the bus architecture.
+                */
+               while (readb(ostm->base + OSTM_TE) & TE)
+                       ;
+       }
+}
+
+static int __init ostm_init_clksrc(struct ostm_device *ostm, unsigned long rate)
+{
+       /*
+        * irq not used (clock sources don't use interrupts)
+        */
+
+       ostm_timer_stop(ostm);
+
+       writel(0, ostm->base + OSTM_CMP);
+       writeb(CTL_FREERUN, ostm->base + OSTM_CTL);
+       writeb(TS, ostm->base + OSTM_TS);
+
+       return clocksource_mmio_init(ostm->base + OSTM_CNT,
+                       "ostm", rate,
+                       300, 32, clocksource_mmio_readl_up);
+}
+
+static u64 notrace ostm_read_sched_clock(void)
+{
+       return readl(system_clock);
+}
+
+static void __init ostm_init_sched_clock(struct ostm_device *ostm,
+                       unsigned long rate)
+{
+       system_clock = ostm->base + OSTM_CNT;
+       sched_clock_register(ostm_read_sched_clock, 32, rate);
+}
+
+static int ostm_clock_event_next(unsigned long delta,
+                                    struct clock_event_device *ced)
+{
+       struct ostm_device *ostm = ced_to_ostm(ced);
+
+       ostm_timer_stop(ostm);
+
+       writel(delta, ostm->base + OSTM_CMP);
+       writeb(CTL_ONESHOT, ostm->base + OSTM_CTL);
+       writeb(TS, ostm->base + OSTM_TS);
+
+       return 0;
+}
+
+static int ostm_shutdown(struct clock_event_device *ced)
+{
+       struct ostm_device *ostm = ced_to_ostm(ced);
+
+       ostm_timer_stop(ostm);
+
+       return 0;
+}
+static int ostm_set_periodic(struct clock_event_device *ced)
+{
+       struct ostm_device *ostm = ced_to_ostm(ced);
+
+       if (clockevent_state_oneshot(ced) || clockevent_state_periodic(ced))
+               ostm_timer_stop(ostm);
+
+       writel(ostm->ticks_per_jiffy - 1, ostm->base + OSTM_CMP);
+       writeb(CTL_PERIODIC, ostm->base + OSTM_CTL);
+       writeb(TS, ostm->base + OSTM_TS);
+
+       return 0;
+}
+
+static int ostm_set_oneshot(struct clock_event_device *ced)
+{
+       struct ostm_device *ostm = ced_to_ostm(ced);
+
+       ostm_timer_stop(ostm);
+
+       return 0;
+}
+
+static irqreturn_t ostm_timer_interrupt(int irq, void *dev_id)
+{
+       struct ostm_device *ostm = dev_id;
+
+       if (clockevent_state_oneshot(&ostm->ced))
+               ostm_timer_stop(ostm);
+
+       /* notify clockevent layer */
+       if (ostm->ced.event_handler)
+               ostm->ced.event_handler(&ostm->ced);
+
+       return IRQ_HANDLED;
+}
+
+static int __init ostm_init_clkevt(struct ostm_device *ostm, int irq,
+                       unsigned long rate)
+{
+       struct clock_event_device *ced = &ostm->ced;
+       int ret = -ENXIO;
+
+       ret = request_irq(irq, ostm_timer_interrupt,
+                         IRQF_TIMER | IRQF_IRQPOLL,
+                         "ostm", ostm);
+       if (ret) {
+               pr_err("ostm: failed to request irq\n");
+               return ret;
+       }
+
+       ced->name = "ostm";
+       ced->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC;
+       ced->set_state_shutdown = ostm_shutdown;
+       ced->set_state_periodic = ostm_set_periodic;
+       ced->set_state_oneshot = ostm_set_oneshot;
+       ced->set_next_event = ostm_clock_event_next;
+       ced->shift = 32;
+       ced->rating = 300;
+       ced->cpumask = cpumask_of(0);
+       clockevents_config_and_register(ced, rate, 0xf, 0xffffffff);
+
+       return 0;
+}
+
+static int __init ostm_init(struct device_node *np)
+{
+       struct ostm_device *ostm;
+       int ret = -EFAULT;
+       struct clk *ostm_clk = NULL;
+       int irq;
+       unsigned long rate;
+
+       ostm = kzalloc(sizeof(*ostm), GFP_KERNEL);
+       if (!ostm)
+               return -ENOMEM;
+
+       ostm->base = of_iomap(np, 0);
+       if (!ostm->base) {
+               pr_err("ostm: failed to remap I/O memory\n");
+               goto err;
+       }
+
+       irq = irq_of_parse_and_map(np, 0);
+       if (irq < 0) {
+               pr_err("ostm: Failed to get irq\n");
+               goto err;
+       }
+
+       ostm_clk = of_clk_get(np, 0);
+       if (IS_ERR(ostm_clk)) {
+               pr_err("ostm: Failed to get clock\n");
+               ostm_clk = NULL;
+               goto err;
+       }
+
+       ret = clk_prepare_enable(ostm_clk);
+       if (ret) {
+               pr_err("ostm: Failed to enable clock\n");
+               goto err;
+       }
+
+       rate = clk_get_rate(ostm_clk);
+       ostm->ticks_per_jiffy = (rate + HZ / 2) / HZ;
+
+       /*
+        * First probed device will be used as system clocksource. Any
+        * additional devices will be used as clock events.
+        */
+       if (!system_clock) {
+               ret = ostm_init_clksrc(ostm, rate);
+
+               if (!ret) {
+                       ostm_init_sched_clock(ostm, rate);
+                       pr_info("ostm: used for clocksource\n");
+               }
+
+       } else {
+               ret = ostm_init_clkevt(ostm, irq, rate);
+
+               if (!ret)
+                       pr_info("ostm: used for clock events\n");
+       }
+
+err:
+       if (ret) {
+               clk_disable_unprepare(ostm_clk);
+               iounmap(ostm->base);
+               kfree(ostm);
+               return ret;
+       }
+
+       return 0;
+}
+
+CLOCKSOURCE_OF_DECLARE(ostm, "renesas,ostm", ostm_init);
index d4ca9962a7595a0206710a0dd4a95656f426ae8e..745844ee973e1deda08203725d9b9d1b8e412972 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/io.h>
 #include <linux/platform_device.h>
 #include <linux/atmel_tc.h>
+#include <linux/sched_clock.h>
 
 
 /*
@@ -56,11 +57,16 @@ static u64 tc_get_cycles(struct clocksource *cs)
        return (upper << 16) | lower;
 }
 
-static u64 tc_get_cycles32(struct clocksource *cs)
+static u32 tc_get_cv32(void)
 {
        return __raw_readl(tcaddr + ATMEL_TC_REG(0, CV));
 }
 
+static u64 tc_get_cycles32(struct clocksource *cs)
+{
+       return tc_get_cv32();
+}
+
 static struct clocksource clksrc = {
        .name           = "tcb_clksrc",
        .rating         = 200,
@@ -69,6 +75,11 @@ static struct clocksource clksrc = {
        .flags          = CLOCK_SOURCE_IS_CONTINUOUS,
 };
 
+static u64 notrace tc_read_sched_clock(void)
+{
+       return tc_get_cv32();
+}
+
 #ifdef CONFIG_GENERIC_CLOCKEVENTS
 
 struct tc_clkevt_device {
@@ -339,6 +350,9 @@ static int __init tcb_clksrc_init(void)
                clksrc.read = tc_get_cycles32;
                /* setup ony channel 0 */
                tcb_setup_single_chan(tc, best_divisor_idx);
+
+               /* register sched_clock on chips with single 32 bit counter */
+               sched_clock_register(tc_read_sched_clock, 32, divided_rate);
        } else {
                /* tclib will give us three clocks no matter what the
                 * underlying platform supports.
diff --git a/drivers/clocksource/timer-gemini.c b/drivers/clocksource/timer-gemini.c
new file mode 100644 (file)
index 0000000..dda27b7
--- /dev/null
@@ -0,0 +1,277 @@
+/*
+ * Gemini timer driver
+ * Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org>
+ *
+ * Based on a rewrite of arch/arm/mach-gemini/timer.c:
+ * Copyright (C) 2001-2006 Storlink, Corp.
+ * Copyright (C) 2008-2009 Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
+ */
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/clockchips.h>
+#include <linux/clocksource.h>
+#include <linux/sched_clock.h>
+
+/*
+ * Relevant registers in the global syscon
+ */
+#define GLOBAL_STATUS          0x04
+#define CPU_AHB_RATIO_MASK     (0x3 << 18)
+#define CPU_AHB_1_1            (0x0 << 18)
+#define CPU_AHB_3_2            (0x1 << 18)
+#define CPU_AHB_24_13          (0x2 << 18)
+#define CPU_AHB_2_1            (0x3 << 18)
+#define REG_TO_AHB_SPEED(reg)  ((((reg) >> 15) & 0x7) * 10 + 130)
+
+/*
+ * Register definitions for the timers
+ */
+#define TIMER1_COUNT           (0x00)
+#define TIMER1_LOAD            (0x04)
+#define TIMER1_MATCH1          (0x08)
+#define TIMER1_MATCH2          (0x0c)
+#define TIMER2_COUNT           (0x10)
+#define TIMER2_LOAD            (0x14)
+#define TIMER2_MATCH1          (0x18)
+#define TIMER2_MATCH2          (0x1c)
+#define TIMER3_COUNT           (0x20)
+#define TIMER3_LOAD            (0x24)
+#define TIMER3_MATCH1          (0x28)
+#define TIMER3_MATCH2          (0x2c)
+#define TIMER_CR               (0x30)
+#define TIMER_INTR_STATE       (0x34)
+#define TIMER_INTR_MASK                (0x38)
+
+#define TIMER_1_CR_ENABLE      (1 << 0)
+#define TIMER_1_CR_CLOCK       (1 << 1)
+#define TIMER_1_CR_INT         (1 << 2)
+#define TIMER_2_CR_ENABLE      (1 << 3)
+#define TIMER_2_CR_CLOCK       (1 << 4)
+#define TIMER_2_CR_INT         (1 << 5)
+#define TIMER_3_CR_ENABLE      (1 << 6)
+#define TIMER_3_CR_CLOCK       (1 << 7)
+#define TIMER_3_CR_INT         (1 << 8)
+#define TIMER_1_CR_UPDOWN      (1 << 9)
+#define TIMER_2_CR_UPDOWN      (1 << 10)
+#define TIMER_3_CR_UPDOWN      (1 << 11)
+#define TIMER_DEFAULT_FLAGS    (TIMER_1_CR_UPDOWN | \
+                                TIMER_3_CR_ENABLE | \
+                                TIMER_3_CR_UPDOWN)
+
+#define TIMER_1_INT_MATCH1     (1 << 0)
+#define TIMER_1_INT_MATCH2     (1 << 1)
+#define TIMER_1_INT_OVERFLOW   (1 << 2)
+#define TIMER_2_INT_MATCH1     (1 << 3)
+#define TIMER_2_INT_MATCH2     (1 << 4)
+#define TIMER_2_INT_OVERFLOW   (1 << 5)
+#define TIMER_3_INT_MATCH1     (1 << 6)
+#define TIMER_3_INT_MATCH2     (1 << 7)
+#define TIMER_3_INT_OVERFLOW   (1 << 8)
+#define TIMER_INT_ALL_MASK     0x1ff
+
+static unsigned int tick_rate;
+static void __iomem *base;
+
+static u64 notrace gemini_read_sched_clock(void)
+{
+       return readl(base + TIMER3_COUNT);
+}
+
+static int gemini_timer_set_next_event(unsigned long cycles,
+                                      struct clock_event_device *evt)
+{
+       u32 cr;
+
+       /* Setup the match register */
+       cr = readl(base + TIMER1_COUNT);
+       writel(cr + cycles, base + TIMER1_MATCH1);
+       if (readl(base + TIMER1_COUNT) - cr > cycles)
+               return -ETIME;
+
+       return 0;
+}
+
+static int gemini_timer_shutdown(struct clock_event_device *evt)
+{
+       u32 cr;
+
+       /*
+        * Disable also for oneshot: the set_next() call will arm the timer
+        * instead.
+        */
+       /* Stop timer and interrupt. */
+       cr = readl(base + TIMER_CR);
+       cr &= ~(TIMER_1_CR_ENABLE | TIMER_1_CR_INT);
+       writel(cr, base + TIMER_CR);
+
+       /* Setup counter start from 0 */
+       writel(0, base + TIMER1_COUNT);
+       writel(0, base + TIMER1_LOAD);
+
+       /* enable interrupt */
+       cr = readl(base + TIMER_INTR_MASK);
+       cr &= ~(TIMER_1_INT_OVERFLOW | TIMER_1_INT_MATCH2);
+       cr |= TIMER_1_INT_MATCH1;
+       writel(cr, base + TIMER_INTR_MASK);
+
+       /* start the timer */
+       cr = readl(base + TIMER_CR);
+       cr |= TIMER_1_CR_ENABLE;
+       writel(cr, base + TIMER_CR);
+
+       return 0;
+}
+
+static int gemini_timer_set_periodic(struct clock_event_device *evt)
+{
+       u32 period = DIV_ROUND_CLOSEST(tick_rate, HZ);
+       u32 cr;
+
+       /* Stop timer and interrupt */
+       cr = readl(base + TIMER_CR);
+       cr &= ~(TIMER_1_CR_ENABLE | TIMER_1_CR_INT);
+       writel(cr, base + TIMER_CR);
+
+       /* Setup timer to fire at 1/HT intervals. */
+       cr = 0xffffffff - (period - 1);
+       writel(cr, base + TIMER1_COUNT);
+       writel(cr, base + TIMER1_LOAD);
+
+       /* enable interrupt on overflow */
+       cr = readl(base + TIMER_INTR_MASK);
+       cr &= ~(TIMER_1_INT_MATCH1 | TIMER_1_INT_MATCH2);
+       cr |= TIMER_1_INT_OVERFLOW;
+       writel(cr, base + TIMER_INTR_MASK);
+
+       /* Start the timer */
+       cr = readl(base + TIMER_CR);
+       cr |= TIMER_1_CR_ENABLE;
+       cr |= TIMER_1_CR_INT;
+       writel(cr, base + TIMER_CR);
+
+       return 0;
+}
+
+/* Use TIMER1 as clock event */
+static struct clock_event_device gemini_clockevent = {
+       .name                   = "TIMER1",
+       /* Reasonably fast and accurate clock event */
+       .rating                 = 300,
+       .shift                  = 32,
+       .features               = CLOCK_EVT_FEAT_PERIODIC |
+                                 CLOCK_EVT_FEAT_ONESHOT,
+       .set_next_event         = gemini_timer_set_next_event,
+       .set_state_shutdown     = gemini_timer_shutdown,
+       .set_state_periodic     = gemini_timer_set_periodic,
+       .set_state_oneshot      = gemini_timer_shutdown,
+       .tick_resume            = gemini_timer_shutdown,
+};
+
+/*
+ * IRQ handler for the timer
+ */
+static irqreturn_t gemini_timer_interrupt(int irq, void *dev_id)
+{
+       struct clock_event_device *evt = &gemini_clockevent;
+
+       evt->event_handler(evt);
+       return IRQ_HANDLED;
+}
+
+static struct irqaction gemini_timer_irq = {
+       .name           = "Gemini Timer Tick",
+       .flags          = IRQF_TIMER,
+       .handler        = gemini_timer_interrupt,
+};
+
+static int __init gemini_timer_of_init(struct device_node *np)
+{
+       static struct regmap *map;
+       int irq;
+       int ret;
+       u32 val;
+
+       map = syscon_regmap_lookup_by_phandle(np, "syscon");
+       if (IS_ERR(map)) {
+               pr_err("Can't get regmap for syscon handle");
+               return -ENODEV;
+       }
+       ret = regmap_read(map, GLOBAL_STATUS, &val);
+       if (ret) {
+               pr_err("Can't read syscon status register");
+               return -ENXIO;
+       }
+
+       base = of_iomap(np, 0);
+       if (!base) {
+               pr_err("Can't remap registers");
+               return -ENXIO;
+       }
+       /* IRQ for timer 1 */
+       irq = irq_of_parse_and_map(np, 0);
+       if (irq <= 0) {
+               pr_err("Can't parse IRQ");
+               return -EINVAL;
+       }
+
+       tick_rate = REG_TO_AHB_SPEED(val) * 1000000;
+       printk(KERN_INFO "Bus: %dMHz", tick_rate / 1000000);
+
+       tick_rate /= 6;         /* APB bus run AHB*(1/6) */
+
+       switch (val & CPU_AHB_RATIO_MASK) {
+       case CPU_AHB_1_1:
+               printk(KERN_CONT "(1/1)\n");
+               break;
+       case CPU_AHB_3_2:
+               printk(KERN_CONT "(3/2)\n");
+               break;
+       case CPU_AHB_24_13:
+               printk(KERN_CONT "(24/13)\n");
+               break;
+       case CPU_AHB_2_1:
+               printk(KERN_CONT "(2/1)\n");
+               break;
+       }
+
+       /*
+        * Reset the interrupt mask and status
+        */
+       writel(TIMER_INT_ALL_MASK, base + TIMER_INTR_MASK);
+       writel(0, base + TIMER_INTR_STATE);
+       writel(TIMER_DEFAULT_FLAGS, base + TIMER_CR);
+
+       /*
+        * Setup free-running clocksource timer (interrupts
+        * disabled.)
+        */
+       writel(0, base + TIMER3_COUNT);
+       writel(0, base + TIMER3_LOAD);
+       writel(0, base + TIMER3_MATCH1);
+       writel(0, base + TIMER3_MATCH2);
+       clocksource_mmio_init(base + TIMER3_COUNT,
+                             "gemini_clocksource", tick_rate,
+                             300, 32, clocksource_mmio_readl_up);
+       sched_clock_register(gemini_read_sched_clock, 32, tick_rate);
+
+       /*
+        * Setup clockevent timer (interrupt-driven.)
+        */
+       writel(0, base + TIMER1_COUNT);
+       writel(0, base + TIMER1_LOAD);
+       writel(0, base + TIMER1_MATCH1);
+       writel(0, base + TIMER1_MATCH2);
+       setup_irq(irq, &gemini_timer_irq);
+       gemini_clockevent.cpumask = cpumask_of(0);
+       clockevents_config_and_register(&gemini_clockevent, tick_rate,
+                                       1, 0xffffffff);
+
+       return 0;
+}
+CLOCKSOURCE_OF_DECLARE(nomadik_mtu, "cortina,gemini-timer",
+                      gemini_timer_of_init);
index d8b164a7c4e517f8da42507ecaf979043dace3d8..4ebae43118effe98f4763618cbd0777060e8e134 100644 (file)
@@ -37,14 +37,6 @@ config CPU_FREQ_STAT
 
          If in doubt, say N.
 
-config CPU_FREQ_STAT_DETAILS
-       bool "CPU frequency transition statistics details"
-       depends on CPU_FREQ_STAT
-       help
-         Show detailed CPU frequency transition table in sysfs.
-
-         If in doubt, say N.
-
 choice
        prompt "Default CPUFreq governor"
        default CPU_FREQ_DEFAULT_GOV_USERSPACE if ARM_SA1100_CPUFREQ || ARM_SA1110_CPUFREQ
@@ -271,6 +263,16 @@ config IA64_ACPI_CPUFREQ
 endif
 
 if MIPS
+config BMIPS_CPUFREQ
+       tristate "BMIPS CPUfreq Driver"
+       help
+         This option adds a CPUfreq driver for BMIPS processors with
+         support for configurable CPU frequency.
+
+         For now, BMIPS5 chips are supported (such as the Broadcom 7425).
+
+         If in doubt, say N.
+
 config LOONGSON2_CPUFREQ
        tristate "Loongson2 CPUFreq Driver"
        help
@@ -332,7 +334,7 @@ endif
 
 config QORIQ_CPUFREQ
        tristate "CPU frequency scaling driver for Freescale QorIQ SoCs"
-       depends on OF && COMMON_CLK && (PPC_E500MC || ARM)
+       depends on OF && COMMON_CLK && (PPC_E500MC || ARM || ARM64)
        depends on !CPU_THERMAL || THERMAL
        select CLK_QORIQ
        help
index 920c469f3953e890bd747413750be8ad6a4352e2..74fa5c5904d388444f357bc7b7e9e1737a45fd29 100644 (file)
@@ -247,6 +247,17 @@ config ARM_TEGRA124_CPUFREQ
        help
          This adds the CPUFreq driver support for Tegra124 SOCs.
 
+config ARM_TI_CPUFREQ
+       bool "Texas Instruments CPUFreq support"
+       depends on ARCH_OMAP2PLUS
+       help
+         This driver enables valid OPPs on the running platform based on
+         values contained within the SoC in use. Enable this in order to
+         use the cpufreq-dt driver on all Texas Instruments platforms that
+         provide dt based operating-points-v2 tables with opp-supported-hw
+         data provided. Required for cpufreq support on AM335x, AM437x,
+         DRA7x, and AM57x platforms.
+
 config ARM_PXA2xx_CPUFREQ
        tristate "Intel PXA2xx CPUfreq driver"
        depends on PXA27x || PXA25x
@@ -257,7 +268,7 @@ config ARM_PXA2xx_CPUFREQ
 
 config ACPI_CPPC_CPUFREQ
        tristate "CPUFreq driver based on the ACPI CPPC spec"
-       depends on ACPI
+       depends on ACPI_PROCESSOR
        select ACPI_CPPC_LIB
        default n
        help
index 1e46c3918e7a630647d744549a46c184e5e8fd80..9f5a8045f36d37710c8196e8ddf1145817664f05 100644 (file)
@@ -77,6 +77,7 @@ obj-$(CONFIG_ARM_SPEAR_CPUFREQ)               += spear-cpufreq.o
 obj-$(CONFIG_ARM_STI_CPUFREQ)          += sti-cpufreq.o
 obj-$(CONFIG_ARM_TEGRA20_CPUFREQ)      += tegra20-cpufreq.o
 obj-$(CONFIG_ARM_TEGRA124_CPUFREQ)     += tegra124-cpufreq.o
+obj-$(CONFIG_ARM_TI_CPUFREQ)           += ti-cpufreq.o
 obj-$(CONFIG_ARM_VEXPRESS_SPC_CPUFREQ) += vexpress-spc-cpufreq.o
 obj-$(CONFIG_ACPI_CPPC_CPUFREQ) += cppc_cpufreq.o
 obj-$(CONFIG_MACH_MVEBU_V7)            += mvebu-cpufreq.o
@@ -98,6 +99,7 @@ obj-$(CONFIG_POWERNV_CPUFREQ)         += powernv-cpufreq.o
 # Other platform drivers
 obj-$(CONFIG_AVR32_AT32AP_CPUFREQ)     += at32ap-cpufreq.o
 obj-$(CONFIG_BFIN_CPU_FREQ)            += blackfin-cpufreq.o
+obj-$(CONFIG_BMIPS_CPUFREQ)            += bmips-cpufreq.o
 obj-$(CONFIG_CRIS_MACH_ARTPEC3)                += cris-artpec3-cpufreq.o
 obj-$(CONFIG_ETRAXFS)                  += cris-etraxfs-cpufreq.o
 obj-$(CONFIG_IA64_ACPI_CPUFREQ)                += ia64-acpi-cpufreq.o
diff --git a/drivers/cpufreq/bmips-cpufreq.c b/drivers/cpufreq/bmips-cpufreq.c
new file mode 100644 (file)
index 0000000..1653151
--- /dev/null
@@ -0,0 +1,188 @@
+/*
+ * CPU frequency scaling for Broadcom BMIPS SoCs
+ *
+ * Copyright (c) 2017 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/cpufreq.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+
+/* for mips_hpt_frequency */
+#include <asm/time.h>
+
+#define BMIPS_CPUFREQ_PREFIX   "bmips"
+#define BMIPS_CPUFREQ_NAME     BMIPS_CPUFREQ_PREFIX "-cpufreq"
+
+#define TRANSITION_LATENCY     (25 * 1000)     /* 25 us */
+
+#define BMIPS5_CLK_DIV_SET_SHIFT       0x7
+#define BMIPS5_CLK_DIV_SHIFT           0x4
+#define BMIPS5_CLK_DIV_MASK            0xf
+
+enum bmips_type {
+       BMIPS5000,
+       BMIPS5200,
+};
+
+struct cpufreq_compat {
+       const char *compatible;
+       unsigned int bmips_type;
+       unsigned int clk_mult;
+       unsigned int max_freqs;
+};
+
+#define BMIPS(c, t, m, f) { \
+       .compatible = c, \
+       .bmips_type = (t), \
+       .clk_mult = (m), \
+       .max_freqs = (f), \
+}
+
+static struct cpufreq_compat bmips_cpufreq_compat[] = {
+       BMIPS("brcm,bmips5000", BMIPS5000, 8, 4),
+       BMIPS("brcm,bmips5200", BMIPS5200, 8, 4),
+       { }
+};
+
+static struct cpufreq_compat *priv;
+
+static int htp_freq_to_cpu_freq(unsigned int clk_mult)
+{
+       return mips_hpt_frequency * clk_mult / 1000;
+}
+
+static struct cpufreq_frequency_table *
+bmips_cpufreq_get_freq_table(const struct cpufreq_policy *policy)
+{
+       struct cpufreq_frequency_table *table;
+       unsigned long cpu_freq;
+       int i;
+
+       cpu_freq = htp_freq_to_cpu_freq(priv->clk_mult);
+
+       table = kmalloc((priv->max_freqs + 1) * sizeof(*table), GFP_KERNEL);
+       if (!table)
+               return ERR_PTR(-ENOMEM);
+
+       for (i = 0; i < priv->max_freqs; i++) {
+               table[i].frequency = cpu_freq / (1 << i);
+               table[i].driver_data = i;
+       }
+       table[i].frequency = CPUFREQ_TABLE_END;
+
+       return table;
+}
+
+static unsigned int bmips_cpufreq_get(unsigned int cpu)
+{
+       unsigned int div;
+       uint32_t mode;
+
+       switch (priv->bmips_type) {
+       case BMIPS5200:
+       case BMIPS5000:
+               mode = read_c0_brcm_mode();
+               div = ((mode >> BMIPS5_CLK_DIV_SHIFT) & BMIPS5_CLK_DIV_MASK);
+               break;
+       default:
+               div = 0;
+       }
+
+       return htp_freq_to_cpu_freq(priv->clk_mult) / (1 << div);
+}
+
+static int bmips_cpufreq_target_index(struct cpufreq_policy *policy,
+                                     unsigned int index)
+{
+       unsigned int div = policy->freq_table[index].driver_data;
+
+       switch (priv->bmips_type) {
+       case BMIPS5200:
+       case BMIPS5000:
+               change_c0_brcm_mode(BMIPS5_CLK_DIV_MASK << BMIPS5_CLK_DIV_SHIFT,
+                                   (1 << BMIPS5_CLK_DIV_SET_SHIFT) |
+                                   (div << BMIPS5_CLK_DIV_SHIFT));
+               break;
+       default:
+               return -ENOTSUPP;
+       }
+
+       return 0;
+}
+
+static int bmips_cpufreq_exit(struct cpufreq_policy *policy)
+{
+       kfree(policy->freq_table);
+
+       return 0;
+}
+
+static int bmips_cpufreq_init(struct cpufreq_policy *policy)
+{
+       struct cpufreq_frequency_table *freq_table;
+       int ret;
+
+       freq_table = bmips_cpufreq_get_freq_table(policy);
+       if (IS_ERR(freq_table)) {
+               ret = PTR_ERR(freq_table);
+               pr_err("%s: couldn't determine frequency table (%d).\n",
+                       BMIPS_CPUFREQ_NAME, ret);
+               return ret;
+       }
+
+       ret = cpufreq_generic_init(policy, freq_table, TRANSITION_LATENCY);
+       if (ret)
+               bmips_cpufreq_exit(policy);
+       else
+               pr_info("%s: registered\n", BMIPS_CPUFREQ_NAME);
+
+       return ret;
+}
+
+static struct cpufreq_driver bmips_cpufreq_driver = {
+       .flags          = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+       .verify         = cpufreq_generic_frequency_table_verify,
+       .target_index   = bmips_cpufreq_target_index,
+       .get            = bmips_cpufreq_get,
+       .init           = bmips_cpufreq_init,
+       .exit           = bmips_cpufreq_exit,
+       .attr           = cpufreq_generic_attr,
+       .name           = BMIPS_CPUFREQ_PREFIX,
+};
+
+static int __init bmips_cpufreq_probe(void)
+{
+       struct cpufreq_compat *cc;
+       struct device_node *np;
+
+       for (cc = bmips_cpufreq_compat; cc->compatible; cc++) {
+               np = of_find_compatible_node(NULL, "cpu", cc->compatible);
+               if (np) {
+                       of_node_put(np);
+                       priv = cc;
+                       break;
+               }
+       }
+
+       /* We hit the guard element of the array. No compatible CPU found. */
+       if (!cc->compatible)
+               return -ENODEV;
+
+       return cpufreq_register_driver(&bmips_cpufreq_driver);
+}
+device_initcall(bmips_cpufreq_probe);
+
+MODULE_AUTHOR("Markus Mayer <mmayer@broadcom.com>");
+MODULE_DESCRIPTION("CPUfreq driver for Broadcom BMIPS SoCs");
+MODULE_LICENSE("GPL");
index 4fda623e55bb728e910ac0f0995cc7d8cb34f330..7281a2c19c362177a57a962f8d587ef40764b92e 100644 (file)
@@ -784,8 +784,19 @@ static int brcm_avs_target_index(struct cpufreq_policy *policy,
 static int brcm_avs_suspend(struct cpufreq_policy *policy)
 {
        struct private_data *priv = policy->driver_data;
+       int ret;
+
+       ret = brcm_avs_get_pmap(priv, &priv->pmap);
+       if (ret)
+               return ret;
 
-       return brcm_avs_get_pmap(priv, &priv->pmap);
+       /*
+        * We can't use the P-state returned by brcm_avs_get_pmap(), since
+        * that's the initial P-state from when the P-map was downloaded to the
+        * AVS co-processor, not necessarily the P-state we are running at now.
+        * So, we get the current P-state explicitly.
+        */
+       return brcm_avs_get_pstate(priv, &priv->pmap.state);
 }
 
 static int brcm_avs_resume(struct cpufreq_policy *policy)
@@ -867,7 +878,6 @@ unmap_intr_base:
        iounmap(priv->avs_intr_base);
 unmap_base:
        iounmap(priv->base);
-       platform_set_drvdata(pdev, NULL);
 
        return ret;
 }
@@ -954,9 +964,9 @@ static ssize_t show_brcm_avs_pmap(struct cpufreq_policy *policy, char *buf)
        brcm_avs_parse_p1(pmap.p1, &mdiv_p0, &pdiv, &ndiv);
        brcm_avs_parse_p2(pmap.p2, &mdiv_p1, &mdiv_p2, &mdiv_p3, &mdiv_p4);
 
-       return sprintf(buf, "0x%08x 0x%08x %u %u %u %u %u %u %u\n",
+       return sprintf(buf, "0x%08x 0x%08x %u %u %u %u %u %u %u %u %u\n",
                pmap.p1, pmap.p2, ndiv, pdiv, mdiv_p0, mdiv_p1, mdiv_p2,
-               mdiv_p3, mdiv_p4);
+               mdiv_p3, mdiv_p4, pmap.mode, pmap.state);
 }
 
 static ssize_t show_brcm_avs_voltage(struct cpufreq_policy *policy, char *buf)
@@ -1031,7 +1041,6 @@ static int brcm_avs_cpufreq_remove(struct platform_device *pdev)
        priv = platform_get_drvdata(pdev);
        iounmap(priv->base);
        iounmap(priv->avs_intr_base);
-       platform_set_drvdata(pdev, NULL);
 
        return 0;
 }
index 7fcaf26e8f819b7665f6e9bf83c07a1b705b1d65..921b4a6c3d16bece3177b1e407883c8df9bcfa4a 100644 (file)
@@ -87,8 +87,6 @@ static const struct of_device_id machines[] __initconst = {
        { .compatible = "socionext,uniphier-ld11", },
        { .compatible = "socionext,uniphier-ld20", },
 
-       { .compatible = "ti,am33xx", },
-       { .compatible = "ti,dra7", },
        { .compatible = "ti,omap2", },
        { .compatible = "ti,omap3", },
        { .compatible = "ti,omap4", },
index cc475eff90b3e5a8964d3a3a28c5ad334657788b..a475432818642fee4547699011ba4cf5aa619f3a 100644 (file)
@@ -132,7 +132,7 @@ static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
        u64 cur_wall_time;
        u64 busy_time;
 
-       cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
+       cur_wall_time = jiffies64_to_nsecs(get_jiffies_64());
 
        busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
        busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
@@ -143,9 +143,9 @@ static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
 
        idle_time = cur_wall_time - busy_time;
        if (wall)
-               *wall = cputime_to_usecs(cur_wall_time);
+               *wall = div_u64(cur_wall_time, NSEC_PER_USEC);
 
-       return cputime_to_usecs(idle_time);
+       return div_u64(idle_time, NSEC_PER_USEC);
 }
 
 u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
@@ -1078,15 +1078,11 @@ err_free_policy:
        return NULL;
 }
 
-static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy, bool notify)
+static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
 {
        struct kobject *kobj;
        struct completion *cmp;
 
-       if (notify)
-               blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
-                                            CPUFREQ_REMOVE_POLICY, policy);
-
        down_write(&policy->rwsem);
        cpufreq_stats_free_table(policy);
        kobj = &policy->kobj;
@@ -1104,7 +1100,7 @@ static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy, bool notify)
        pr_debug("wait complete\n");
 }
 
-static void cpufreq_policy_free(struct cpufreq_policy *policy, bool notify)
+static void cpufreq_policy_free(struct cpufreq_policy *policy)
 {
        unsigned long flags;
        int cpu;
@@ -1117,7 +1113,7 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy, bool notify)
                per_cpu(cpufreq_cpu_data, cpu) = NULL;
        write_unlock_irqrestore(&cpufreq_driver_lock, flags);
 
-       cpufreq_policy_put_kobj(policy, notify);
+       cpufreq_policy_put_kobj(policy);
        free_cpumask_var(policy->real_cpus);
        free_cpumask_var(policy->related_cpus);
        free_cpumask_var(policy->cpus);
@@ -1170,8 +1166,6 @@ static int cpufreq_online(unsigned int cpu)
        if (new_policy) {
                /* related_cpus should at least include policy->cpus. */
                cpumask_copy(policy->related_cpus, policy->cpus);
-               /* Clear mask of registered CPUs */
-               cpumask_clear(policy->real_cpus);
        }
 
        /*
@@ -1244,17 +1238,12 @@ static int cpufreq_online(unsigned int cpu)
                        goto out_exit_policy;
 
                cpufreq_stats_create_table(policy);
-               blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
-                               CPUFREQ_CREATE_POLICY, policy);
 
                write_lock_irqsave(&cpufreq_driver_lock, flags);
                list_add(&policy->policy_list, &cpufreq_policy_list);
                write_unlock_irqrestore(&cpufreq_driver_lock, flags);
        }
 
-       blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
-                                    CPUFREQ_START, policy);
-
        ret = cpufreq_init_policy(policy);
        if (ret) {
                pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n",
@@ -1282,7 +1271,7 @@ out_exit_policy:
        if (cpufreq_driver->exit)
                cpufreq_driver->exit(policy);
 out_free_policy:
-       cpufreq_policy_free(policy, !new_policy);
+       cpufreq_policy_free(policy);
        return ret;
 }
 
@@ -1403,7 +1392,7 @@ static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
        remove_cpu_dev_symlink(policy, dev);
 
        if (cpumask_empty(policy->real_cpus))
-               cpufreq_policy_free(policy, true);
+               cpufreq_policy_free(policy);
 }
 
 /**
index 0196467280bd0baf85d5380434688e1e5c621523..631bd2c86c5e6e996e157b957754891936cb1c7a 100644 (file)
@@ -152,7 +152,7 @@ unsigned int dbs_update(struct cpufreq_policy *policy)
                if (ignore_nice) {
                        u64 cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
 
-                       idle_time += cputime_to_usecs(cur_nice - j_cdbs->prev_cpu_nice);
+                       idle_time += div_u64(cur_nice - j_cdbs->prev_cpu_nice, NSEC_PER_USEC);
                        j_cdbs->prev_cpu_nice = cur_nice;
                }
 
index ac284e66839c6f19c9156432c6f5c4995e6525fb..f570ead624547e111d74911e722b31aabcfce117 100644 (file)
@@ -13,7 +13,6 @@
 #include <linux/cpufreq.h>
 #include <linux/module.h>
 #include <linux/slab.h>
-#include <linux/cputime.h>
 
 static DEFINE_SPINLOCK(cpufreq_stats_lock);
 
@@ -25,9 +24,7 @@ struct cpufreq_stats {
        unsigned int last_index;
        u64 *time_in_state;
        unsigned int *freq_table;
-#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
        unsigned int *trans_table;
-#endif
 };
 
 static int cpufreq_stats_update(struct cpufreq_stats *stats)
@@ -46,9 +43,7 @@ static void cpufreq_stats_clear_table(struct cpufreq_stats *stats)
        unsigned int count = stats->max_state;
 
        memset(stats->time_in_state, 0, count * sizeof(u64));
-#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
        memset(stats->trans_table, 0, count * count * sizeof(int));
-#endif
        stats->last_time = get_jiffies_64();
        stats->total_trans = 0;
 }
@@ -84,7 +79,6 @@ static ssize_t store_reset(struct cpufreq_policy *policy, const char *buf,
        return count;
 }
 
-#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
 static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
 {
        struct cpufreq_stats *stats = policy->stats;
@@ -129,7 +123,6 @@ static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
        return len;
 }
 cpufreq_freq_attr_ro(trans_table);
-#endif
 
 cpufreq_freq_attr_ro(total_trans);
 cpufreq_freq_attr_ro(time_in_state);
@@ -139,9 +132,7 @@ static struct attribute *default_attrs[] = {
        &total_trans.attr,
        &time_in_state.attr,
        &reset.attr,
-#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
        &trans_table.attr,
-#endif
        NULL
 };
 static struct attribute_group stats_attr_group = {
@@ -200,9 +191,7 @@ void cpufreq_stats_create_table(struct cpufreq_policy *policy)
 
        alloc_size = count * sizeof(int) + count * sizeof(u64);
 
-#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
        alloc_size += count * count * sizeof(int);
-#endif
 
        /* Allocate memory for time_in_state/freq_table/trans_table in one go */
        stats->time_in_state = kzalloc(alloc_size, GFP_KERNEL);
@@ -211,9 +200,7 @@ void cpufreq_stats_create_table(struct cpufreq_policy *policy)
 
        stats->freq_table = (unsigned int *)(stats->time_in_state + count);
 
-#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
        stats->trans_table = stats->freq_table + count;
-#endif
 
        stats->max_state = count;
 
@@ -259,8 +246,6 @@ void cpufreq_stats_record_transition(struct cpufreq_policy *policy,
        cpufreq_stats_update(stats);
 
        stats->last_index = new_index;
-#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
        stats->trans_table[old_index * stats->max_state + new_index]++;
-#endif
        stats->total_trans++;
 }
index f91c25718d164c9d9339acf671d67937995fe076..eb0f7fb716858992f00bb4a02285b3d0189b1f07 100644 (file)
@@ -358,6 +358,8 @@ static struct pstate_funcs pstate_funcs __read_mostly;
 static int hwp_active __read_mostly;
 static bool per_cpu_limits __read_mostly;
 
+static bool driver_registered __read_mostly;
+
 #ifdef CONFIG_ACPI
 static bool acpi_ppc;
 #endif
@@ -394,6 +396,7 @@ static struct perf_limits *limits = &performance_limits;
 static struct perf_limits *limits = &powersave_limits;
 #endif
 
+static DEFINE_MUTEX(intel_pstate_driver_lock);
 static DEFINE_MUTEX(intel_pstate_limits_lock);
 
 #ifdef CONFIG_ACPI
@@ -538,7 +541,6 @@ static void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
 
        acpi_processor_unregister_performance(policy->cpu);
 }
-
 #else
 static inline void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
 {
@@ -873,7 +875,10 @@ static void intel_pstate_hwp_set(struct cpufreq_policy *policy)
 
                rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap);
                hw_min = HWP_LOWEST_PERF(cap);
-               hw_max = HWP_HIGHEST_PERF(cap);
+               if (limits->no_turbo)
+                       hw_max = HWP_GUARANTEED_PERF(cap);
+               else
+                       hw_max = HWP_HIGHEST_PERF(cap);
                range = hw_max - hw_min;
 
                max_perf_pct = perf_limits->max_perf_pct;
@@ -887,11 +892,6 @@ static void intel_pstate_hwp_set(struct cpufreq_policy *policy)
 
                adj_range = max_perf_pct * range / 100;
                max = hw_min + adj_range;
-               if (limits->no_turbo) {
-                       hw_max = HWP_GUARANTEED_PERF(cap);
-                       if (hw_max < max)
-                               max = hw_max;
-               }
 
                value &= ~HWP_MAX_PERF(~0L);
                value |= HWP_MAX_PERF(max);
@@ -1007,37 +1007,59 @@ static int pid_param_get(void *data, u64 *val)
 }
 DEFINE_SIMPLE_ATTRIBUTE(fops_pid_param, pid_param_get, pid_param_set, "%llu\n");
 
+static struct dentry *debugfs_parent;
+
 struct pid_param {
        char *name;
        void *value;
+       struct dentry *dentry;
 };
 
 static struct pid_param pid_files[] = {
-       {"sample_rate_ms", &pid_params.sample_rate_ms},
-       {"d_gain_pct", &pid_params.d_gain_pct},
-       {"i_gain_pct", &pid_params.i_gain_pct},
-       {"deadband", &pid_params.deadband},
-       {"setpoint", &pid_params.setpoint},
-       {"p_gain_pct", &pid_params.p_gain_pct},
-       {NULL, NULL}
+       {"sample_rate_ms", &pid_params.sample_rate_ms},
+       {"d_gain_pct", &pid_params.d_gain_pct},
+       {"i_gain_pct", &pid_params.i_gain_pct},
+       {"deadband", &pid_params.deadband},
+       {"setpoint", &pid_params.setpoint},
+       {"p_gain_pct", &pid_params.p_gain_pct},
+       {NULL, NULL}
 };
 
-static void __init intel_pstate_debug_expose_params(void)
+static void intel_pstate_debug_expose_params(void)
 {
-       struct dentry *debugfs_parent;
-       int i = 0;
+       int i;
 
        debugfs_parent = debugfs_create_dir("pstate_snb", NULL);
        if (IS_ERR_OR_NULL(debugfs_parent))
                return;
-       while (pid_files[i].name) {
-               debugfs_create_file(pid_files[i].name, 0660,
-                                   debugfs_parent, pid_files[i].value,
-                                   &fops_pid_param);
-               i++;
+
+       for (i = 0; pid_files[i].name; i++) {
+               struct dentry *dentry;
+
+               dentry = debugfs_create_file(pid_files[i].name, 0660,
+                                            debugfs_parent, pid_files[i].value,
+                                            &fops_pid_param);
+               if (!IS_ERR(dentry))
+                       pid_files[i].dentry = dentry;
        }
 }
 
+static void intel_pstate_debug_hide_params(void)
+{
+       int i;
+
+       if (IS_ERR_OR_NULL(debugfs_parent))
+               return;
+
+       for (i = 0; pid_files[i].name; i++) {
+               debugfs_remove(pid_files[i].dentry);
+               pid_files[i].dentry = NULL;
+       }
+
+       debugfs_remove(debugfs_parent);
+       debugfs_parent = NULL;
+}
+
 /************************** debugfs end ************************/
 
 /************************** sysfs begin ************************/
@@ -1048,6 +1070,34 @@ static void __init intel_pstate_debug_expose_params(void)
                return sprintf(buf, "%u\n", limits->object);            \
        }
 
+static ssize_t intel_pstate_show_status(char *buf);
+static int intel_pstate_update_status(const char *buf, size_t size);
+
+static ssize_t show_status(struct kobject *kobj,
+                          struct attribute *attr, char *buf)
+{
+       ssize_t ret;
+
+       mutex_lock(&intel_pstate_driver_lock);
+       ret = intel_pstate_show_status(buf);
+       mutex_unlock(&intel_pstate_driver_lock);
+
+       return ret;
+}
+
+static ssize_t store_status(struct kobject *a, struct attribute *b,
+                           const char *buf, size_t count)
+{
+       char *p = memchr(buf, '\n', count);
+       int ret;
+
+       mutex_lock(&intel_pstate_driver_lock);
+       ret = intel_pstate_update_status(buf, p ? p - buf : count);
+       mutex_unlock(&intel_pstate_driver_lock);
+
+       return ret < 0 ? ret : count;
+}
+
 static ssize_t show_turbo_pct(struct kobject *kobj,
                                struct attribute *attr, char *buf)
 {
@@ -1055,12 +1105,22 @@ static ssize_t show_turbo_pct(struct kobject *kobj,
        int total, no_turbo, turbo_pct;
        uint32_t turbo_fp;
 
+       mutex_lock(&intel_pstate_driver_lock);
+
+       if (!driver_registered) {
+               mutex_unlock(&intel_pstate_driver_lock);
+               return -EAGAIN;
+       }
+
        cpu = all_cpu_data[0];
 
        total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1;
        no_turbo = cpu->pstate.max_pstate - cpu->pstate.min_pstate + 1;
        turbo_fp = div_fp(no_turbo, total);
        turbo_pct = 100 - fp_toint(mul_fp(turbo_fp, int_tofp(100)));
+
+       mutex_unlock(&intel_pstate_driver_lock);
+
        return sprintf(buf, "%u\n", turbo_pct);
 }
 
@@ -1070,8 +1130,18 @@ static ssize_t show_num_pstates(struct kobject *kobj,
        struct cpudata *cpu;
        int total;
 
+       mutex_lock(&intel_pstate_driver_lock);
+
+       if (!driver_registered) {
+               mutex_unlock(&intel_pstate_driver_lock);
+               return -EAGAIN;
+       }
+
        cpu = all_cpu_data[0];
        total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1;
+
+       mutex_unlock(&intel_pstate_driver_lock);
+
        return sprintf(buf, "%u\n", total);
 }
 
@@ -1080,12 +1150,21 @@ static ssize_t show_no_turbo(struct kobject *kobj,
 {
        ssize_t ret;
 
+       mutex_lock(&intel_pstate_driver_lock);
+
+       if (!driver_registered) {
+               mutex_unlock(&intel_pstate_driver_lock);
+               return -EAGAIN;
+       }
+
        update_turbo_state();
        if (limits->turbo_disabled)
                ret = sprintf(buf, "%u\n", limits->turbo_disabled);
        else
                ret = sprintf(buf, "%u\n", limits->no_turbo);
 
+       mutex_unlock(&intel_pstate_driver_lock);
+
        return ret;
 }
 
@@ -1099,12 +1178,20 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
        if (ret != 1)
                return -EINVAL;
 
+       mutex_lock(&intel_pstate_driver_lock);
+
+       if (!driver_registered) {
+               mutex_unlock(&intel_pstate_driver_lock);
+               return -EAGAIN;
+       }
+
        mutex_lock(&intel_pstate_limits_lock);
 
        update_turbo_state();
        if (limits->turbo_disabled) {
                pr_warn("Turbo disabled by BIOS or unavailable on processor\n");
                mutex_unlock(&intel_pstate_limits_lock);
+               mutex_unlock(&intel_pstate_driver_lock);
                return -EPERM;
        }
 
@@ -1114,6 +1201,8 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
 
        intel_pstate_update_policies();
 
+       mutex_unlock(&intel_pstate_driver_lock);
+
        return count;
 }
 
@@ -1127,6 +1216,13 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
        if (ret != 1)
                return -EINVAL;
 
+       mutex_lock(&intel_pstate_driver_lock);
+
+       if (!driver_registered) {
+               mutex_unlock(&intel_pstate_driver_lock);
+               return -EAGAIN;
+       }
+
        mutex_lock(&intel_pstate_limits_lock);
 
        limits->max_sysfs_pct = clamp_t(int, input, 0 , 100);
@@ -1142,6 +1238,8 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
 
        intel_pstate_update_policies();
 
+       mutex_unlock(&intel_pstate_driver_lock);
+
        return count;
 }
 
@@ -1155,6 +1253,13 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
        if (ret != 1)
                return -EINVAL;
 
+       mutex_lock(&intel_pstate_driver_lock);
+
+       if (!driver_registered) {
+               mutex_unlock(&intel_pstate_driver_lock);
+               return -EAGAIN;
+       }
+
        mutex_lock(&intel_pstate_limits_lock);
 
        limits->min_sysfs_pct = clamp_t(int, input, 0 , 100);
@@ -1170,12 +1275,15 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
 
        intel_pstate_update_policies();
 
+       mutex_unlock(&intel_pstate_driver_lock);
+
        return count;
 }
 
 show_one(max_perf_pct, max_perf_pct);
 show_one(min_perf_pct, min_perf_pct);
 
+define_one_global_rw(status);
 define_one_global_rw(no_turbo);
 define_one_global_rw(max_perf_pct);
 define_one_global_rw(min_perf_pct);
@@ -1183,6 +1291,7 @@ define_one_global_ro(turbo_pct);
 define_one_global_ro(num_pstates);
 
 static struct attribute *intel_pstate_attributes[] = {
+       &status.attr,
        &no_turbo.attr,
        &turbo_pct.attr,
        &num_pstates.attr,
@@ -1235,6 +1344,25 @@ static void intel_pstate_hwp_enable(struct cpudata *cpudata)
                cpudata->epp_default = intel_pstate_get_epp(cpudata, 0);
 }
 
+#define MSR_IA32_POWER_CTL_BIT_EE      19
+
+/* Disable energy efficiency optimization */
+static void intel_pstate_disable_ee(int cpu)
+{
+       u64 power_ctl;
+       int ret;
+
+       ret = rdmsrl_on_cpu(cpu, MSR_IA32_POWER_CTL, &power_ctl);
+       if (ret)
+               return;
+
+       if (!(power_ctl & BIT(MSR_IA32_POWER_CTL_BIT_EE))) {
+               pr_info("Disabling energy efficiency optimization\n");
+               power_ctl |= BIT(MSR_IA32_POWER_CTL_BIT_EE);
+               wrmsrl_on_cpu(cpu, MSR_IA32_POWER_CTL, power_ctl);
+       }
+}
+
 static int atom_get_min_pstate(void)
 {
        u64 value;
@@ -1345,48 +1473,71 @@ static int core_get_max_pstate_physical(void)
        return (value >> 8) & 0xFF;
 }
 
+static int core_get_tdp_ratio(u64 plat_info)
+{
+       /* Check how many TDP levels present */
+       if (plat_info & 0x600000000) {
+               u64 tdp_ctrl;
+               u64 tdp_ratio;
+               int tdp_msr;
+               int err;
+
+               /* Get the TDP level (0, 1, 2) to get ratios */
+               err = rdmsrl_safe(MSR_CONFIG_TDP_CONTROL, &tdp_ctrl);
+               if (err)
+                       return err;
+
+               /* TDP MSR are continuous starting at 0x648 */
+               tdp_msr = MSR_CONFIG_TDP_NOMINAL + (tdp_ctrl & 0x03);
+               err = rdmsrl_safe(tdp_msr, &tdp_ratio);
+               if (err)
+                       return err;
+
+               /* For level 1 and 2, bits[23:16] contain the ratio */
+               if (tdp_ctrl & 0x03)
+                       tdp_ratio >>= 16;
+
+               tdp_ratio &= 0xff; /* ratios are only 8 bits long */
+               pr_debug("tdp_ratio %x\n", (int)tdp_ratio);
+
+               return (int)tdp_ratio;
+       }
+
+       return -ENXIO;
+}
+
 static int core_get_max_pstate(void)
 {
        u64 tar;
        u64 plat_info;
        int max_pstate;
+       int tdp_ratio;
        int err;
 
        rdmsrl(MSR_PLATFORM_INFO, plat_info);
        max_pstate = (plat_info >> 8) & 0xFF;
 
+       tdp_ratio = core_get_tdp_ratio(plat_info);
+       if (tdp_ratio <= 0)
+               return max_pstate;
+
+       if (hwp_active) {
+               /* Turbo activation ratio is not used on HWP platforms */
+               return tdp_ratio;
+       }
+
        err = rdmsrl_safe(MSR_TURBO_ACTIVATION_RATIO, &tar);
        if (!err) {
+               int tar_levels;
+
                /* Do some sanity checking for safety */
-               if (plat_info & 0x600000000) {
-                       u64 tdp_ctrl;
-                       u64 tdp_ratio;
-                       int tdp_msr;
-
-                       err = rdmsrl_safe(MSR_CONFIG_TDP_CONTROL, &tdp_ctrl);
-                       if (err)
-                               goto skip_tar;
-
-                       tdp_msr = MSR_CONFIG_TDP_NOMINAL + (tdp_ctrl & 0x3);
-                       err = rdmsrl_safe(tdp_msr, &tdp_ratio);
-                       if (err)
-                               goto skip_tar;
-
-                       /* For level 1 and 2, bits[23:16] contain the ratio */
-                       if (tdp_ctrl)
-                               tdp_ratio >>= 16;
-
-                       tdp_ratio &= 0xff; /* ratios are only 8 bits long */
-                       if (tdp_ratio - 1 == tar) {
-                               max_pstate = tar;
-                               pr_debug("max_pstate=TAC %x\n", max_pstate);
-                       } else {
-                               goto skip_tar;
-                       }
+               tar_levels = tar & 0xff;
+               if (tdp_ratio - 1 == tar_levels) {
+                       max_pstate = tar_levels;
+                       pr_debug("max_pstate=TAC %x\n", max_pstate);
                }
        }
 
-skip_tar:
        return max_pstate;
 }
 
@@ -1845,6 +1996,11 @@ static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = {
        {}
 };
 
+static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[] = {
+       ICPU(INTEL_FAM6_KABYLAKE_DESKTOP, core_params),
+       {}
+};
+
 static int intel_pstate_init_cpu(unsigned int cpunum)
 {
        struct cpudata *cpu;
@@ -1875,6 +2031,12 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
        cpu->cpu = cpunum;
 
        if (hwp_active) {
+               const struct x86_cpu_id *id;
+
+               id = x86_match_cpu(intel_pstate_cpu_ee_disable_ids);
+               if (id)
+                       intel_pstate_disable_ee(cpunum);
+
                intel_pstate_hwp_enable(cpu);
                pid_params.sample_rate_ms = 50;
                pid_params.sample_rate_ns = 50 * NSEC_PER_MSEC;
@@ -2005,7 +2167,8 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
                        limits = &performance_limits;
                        perf_limits = limits;
                }
-               if (policy->max >= policy->cpuinfo.max_freq) {
+               if (policy->max >= policy->cpuinfo.max_freq &&
+                   !limits->no_turbo) {
                        pr_debug("set performance\n");
                        intel_pstate_set_performance_limits(perf_limits);
                        goto out;
@@ -2041,12 +2204,37 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
 
 static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
 {
+       struct cpudata *cpu = all_cpu_data[policy->cpu];
+       struct perf_limits *perf_limits;
+
+       if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
+               perf_limits = &performance_limits;
+       else
+               perf_limits = &powersave_limits;
+
+       update_turbo_state();
+       policy->cpuinfo.max_freq = perf_limits->turbo_disabled ||
+                                       perf_limits->no_turbo ?
+                                       cpu->pstate.max_freq :
+                                       cpu->pstate.turbo_freq;
+
        cpufreq_verify_within_cpu_limits(policy);
 
        if (policy->policy != CPUFREQ_POLICY_POWERSAVE &&
            policy->policy != CPUFREQ_POLICY_PERFORMANCE)
                return -EINVAL;
 
+       /* When per-CPU limits are used, sysfs limits are not used */
+       if (!per_cpu_limits) {
+               unsigned int max_freq, min_freq;
+
+               max_freq = policy->cpuinfo.max_freq *
+                                               limits->max_sysfs_pct / 100;
+               min_freq = policy->cpuinfo.max_freq *
+                                               limits->min_sysfs_pct / 100;
+               cpufreq_verify_within_limits(policy, min_freq, max_freq);
+       }
+
        return 0;
 }
 
@@ -2257,6 +2445,111 @@ static struct cpufreq_driver intel_cpufreq = {
 
 static struct cpufreq_driver *intel_pstate_driver = &intel_pstate;
 
+static void intel_pstate_driver_cleanup(void)
+{
+       unsigned int cpu;
+
+       get_online_cpus();
+       for_each_online_cpu(cpu) {
+               if (all_cpu_data[cpu]) {
+                       if (intel_pstate_driver == &intel_pstate)
+                               intel_pstate_clear_update_util_hook(cpu);
+
+                       kfree(all_cpu_data[cpu]);
+                       all_cpu_data[cpu] = NULL;
+               }
+       }
+       put_online_cpus();
+}
+
+static int intel_pstate_register_driver(void)
+{
+       int ret;
+
+       ret = cpufreq_register_driver(intel_pstate_driver);
+       if (ret) {
+               intel_pstate_driver_cleanup();
+               return ret;
+       }
+
+       mutex_lock(&intel_pstate_limits_lock);
+       driver_registered = true;
+       mutex_unlock(&intel_pstate_limits_lock);
+
+       if (intel_pstate_driver == &intel_pstate && !hwp_active &&
+           pstate_funcs.get_target_pstate != get_target_pstate_use_cpu_load)
+               intel_pstate_debug_expose_params();
+
+       return 0;
+}
+
+static int intel_pstate_unregister_driver(void)
+{
+       if (hwp_active)
+               return -EBUSY;
+
+       if (intel_pstate_driver == &intel_pstate && !hwp_active &&
+           pstate_funcs.get_target_pstate != get_target_pstate_use_cpu_load)
+               intel_pstate_debug_hide_params();
+
+       mutex_lock(&intel_pstate_limits_lock);
+       driver_registered = false;
+       mutex_unlock(&intel_pstate_limits_lock);
+
+       cpufreq_unregister_driver(intel_pstate_driver);
+       intel_pstate_driver_cleanup();
+
+       return 0;
+}
+
+static ssize_t intel_pstate_show_status(char *buf)
+{
+       if (!driver_registered)
+               return sprintf(buf, "off\n");
+
+       return sprintf(buf, "%s\n", intel_pstate_driver == &intel_pstate ?
+                                       "active" : "passive");
+}
+
+static int intel_pstate_update_status(const char *buf, size_t size)
+{
+       int ret;
+
+       if (size == 3 && !strncmp(buf, "off", size))
+               return driver_registered ?
+                       intel_pstate_unregister_driver() : -EINVAL;
+
+       if (size == 6 && !strncmp(buf, "active", size)) {
+               if (driver_registered) {
+                       if (intel_pstate_driver == &intel_pstate)
+                               return 0;
+
+                       ret = intel_pstate_unregister_driver();
+                       if (ret)
+                               return ret;
+               }
+
+               intel_pstate_driver = &intel_pstate;
+               return intel_pstate_register_driver();
+       }
+
+       if (size == 7 && !strncmp(buf, "passive", size)) {
+               if (driver_registered) {
+                       if (intel_pstate_driver != &intel_pstate)
+                               return 0;
+
+                       ret = intel_pstate_unregister_driver();
+                       if (ret)
+                               return ret;
+               }
+
+               intel_pstate_driver = &intel_cpufreq;
+               return intel_pstate_register_driver();
+       }
+
+       return -EINVAL;
+}
+
 static int no_load __initdata;
 static int no_hwp __initdata;
 static int hwp_only __initdata;
@@ -2444,9 +2737,9 @@ static const struct x86_cpu_id hwp_support_ids[] __initconst = {
 
 static int __init intel_pstate_init(void)
 {
-       int cpu, rc = 0;
        const struct x86_cpu_id *id;
        struct cpu_defaults *cpu_def;
+       int rc = 0;
 
        if (no_load)
                return -ENODEV;
@@ -2478,45 +2771,29 @@ hwp_cpu_matched:
        if (intel_pstate_platform_pwr_mgmt_exists())
                return -ENODEV;
 
+       if (!hwp_active && hwp_only)
+               return -ENOTSUPP;
+
        pr_info("Intel P-state driver initializing\n");
 
        all_cpu_data = vzalloc(sizeof(void *) * num_possible_cpus());
        if (!all_cpu_data)
                return -ENOMEM;
 
-       if (!hwp_active && hwp_only)
-               goto out;
-
        intel_pstate_request_control_from_smm();
 
-       rc = cpufreq_register_driver(intel_pstate_driver);
-       if (rc)
-               goto out;
-
-       if (intel_pstate_driver == &intel_pstate && !hwp_active &&
-           pstate_funcs.get_target_pstate != get_target_pstate_use_cpu_load)
-               intel_pstate_debug_expose_params();
-
        intel_pstate_sysfs_expose_params();
 
+       mutex_lock(&intel_pstate_driver_lock);
+       rc = intel_pstate_register_driver();
+       mutex_unlock(&intel_pstate_driver_lock);
+       if (rc)
+               return rc;
+
        if (hwp_active)
                pr_info("HWP enabled\n");
 
-       return rc;
-out:
-       get_online_cpus();
-       for_each_online_cpu(cpu) {
-               if (all_cpu_data[cpu]) {
-                       if (intel_pstate_driver == &intel_pstate)
-                               intel_pstate_clear_update_util_hook(cpu);
-
-                       kfree(all_cpu_data[cpu]);
-               }
-       }
-
-       put_online_cpus();
-       vfree(all_cpu_data);
-       return -ENODEV;
+       return 0;
 }
 device_initcall(intel_pstate_init);
 
index 37671b54588030aae885e04c29b5076556223529..3ff5160451b436ec48511a9428ab611b2291b1f9 100644 (file)
@@ -144,6 +144,7 @@ static struct powernv_pstate_info {
        unsigned int max;
        unsigned int nominal;
        unsigned int nr_pstates;
+       bool wof_enabled;
 } powernv_pstate_info;
 
 /* Use following macros for conversions between pstate_id and index */
@@ -203,6 +204,7 @@ static int init_powernv_pstates(void)
        const __be32 *pstate_ids, *pstate_freqs;
        u32 len_ids, len_freqs;
        u32 pstate_min, pstate_max, pstate_nominal;
+       u32 pstate_turbo, pstate_ultra_turbo;
 
        power_mgt = of_find_node_by_path("/ibm,opal/power-mgt");
        if (!power_mgt) {
@@ -225,8 +227,29 @@ static int init_powernv_pstates(void)
                pr_warn("ibm,pstate-nominal not found\n");
                return -ENODEV;
        }
+
+       if (of_property_read_u32(power_mgt, "ibm,pstate-ultra-turbo",
+                                &pstate_ultra_turbo)) {
+               powernv_pstate_info.wof_enabled = false;
+               goto next;
+       }
+
+       if (of_property_read_u32(power_mgt, "ibm,pstate-turbo",
+                                &pstate_turbo)) {
+               powernv_pstate_info.wof_enabled = false;
+               goto next;
+       }
+
+       if (pstate_turbo == pstate_ultra_turbo)
+               powernv_pstate_info.wof_enabled = false;
+       else
+               powernv_pstate_info.wof_enabled = true;
+
+next:
        pr_info("cpufreq pstate min %d nominal %d max %d\n", pstate_min,
                pstate_nominal, pstate_max);
+       pr_info("Workload Optimized Frequency is %s in the platform\n",
+               (powernv_pstate_info.wof_enabled) ? "enabled" : "disabled");
 
        pstate_ids = of_get_property(power_mgt, "ibm,pstate-ids", &len_ids);
        if (!pstate_ids) {
@@ -268,6 +291,13 @@ static int init_powernv_pstates(void)
                        powernv_pstate_info.nominal = i;
                else if (id == pstate_min)
                        powernv_pstate_info.min = i;
+
+               if (powernv_pstate_info.wof_enabled && id == pstate_turbo) {
+                       int j;
+
+                       for (j = i - 1; j >= (int)powernv_pstate_info.max; j--)
+                               powernv_freqs[j].flags = CPUFREQ_BOOST_FREQ;
+               }
        }
 
        /* End of list marker entry */
@@ -305,9 +335,12 @@ static ssize_t cpuinfo_nominal_freq_show(struct cpufreq_policy *policy,
 struct freq_attr cpufreq_freq_attr_cpuinfo_nominal_freq =
        __ATTR_RO(cpuinfo_nominal_freq);
 
+#define SCALING_BOOST_FREQS_ATTR_INDEX         2
+
 static struct freq_attr *powernv_cpu_freq_attr[] = {
        &cpufreq_freq_attr_scaling_available_freqs,
        &cpufreq_freq_attr_cpuinfo_nominal_freq,
+       &cpufreq_freq_attr_scaling_boost_freqs,
        NULL,
 };
 
@@ -1013,11 +1046,22 @@ static int __init powernv_cpufreq_init(void)
        register_reboot_notifier(&powernv_cpufreq_reboot_nb);
        opal_message_notifier_register(OPAL_MSG_OCC, &powernv_cpufreq_opal_nb);
 
+       if (powernv_pstate_info.wof_enabled)
+               powernv_cpufreq_driver.boost_enabled = true;
+       else
+               powernv_cpu_freq_attr[SCALING_BOOST_FREQS_ATTR_INDEX] = NULL;
+
        rc = cpufreq_register_driver(&powernv_cpufreq_driver);
-       if (!rc)
-               return 0;
+       if (rc) {
+               pr_info("Failed to register the cpufreq driver (%d)\n", rc);
+               goto cleanup_notifiers;
+       }
 
-       pr_info("Failed to register the cpufreq driver (%d)\n", rc);
+       if (powernv_pstate_info.wof_enabled)
+               cpufreq_enable_boost_support();
+
+       return 0;
+cleanup_notifiers:
        unregister_all_notifiers();
        clean_chip_info();
 out:
index dc112481a40841e67fdc5c9ffbed0effc5d54ce1..eeaa92251512d45bfdfb8b1313d0165818b32aed 100644 (file)
@@ -100,9 +100,6 @@ static int pmi_notifier(struct notifier_block *nb,
        /* Should this really be called for CPUFREQ_ADJUST and CPUFREQ_NOTIFY
         * policy events?)
         */
-       if (event == CPUFREQ_START)
-               return 0;
-
        node = cbe_cpu_to_node(policy->cpu);
 
        pr_debug("got notified, event=%lu, node=%u\n", event, node);
index 53d8c3fb16f67bfc5a4cba5065c32a5503be4328..a6fefac8afe49a38ccc613bc0c015e3e8f5d6cdc 100644 (file)
@@ -11,6 +11,7 @@
 #define pr_fmt(fmt)    KBUILD_MODNAME ": " fmt
 
 #include <linux/clk.h>
+#include <linux/clk-provider.h>
 #include <linux/cpufreq.h>
 #include <linux/cpu_cooling.h>
 #include <linux/errno.h>
@@ -37,53 +38,20 @@ struct cpu_data {
        struct thermal_cooling_device *cdev;
 };
 
+/*
+ * Don't use cpufreq on this SoC -- used when the SoC would have otherwise
+ * matched a more generic compatible.
+ */
+#define SOC_BLACKLIST          1
+
 /**
  * struct soc_data - SoC specific data
- * @freq_mask: mask the disallowed frequencies
- * @flag: unique flags
+ * @flags: SOC_xxx
  */
 struct soc_data {
-       u32 freq_mask[4];
-       u32 flag;
-};
-
-#define FREQ_MASK      1
-/* see hardware specification for the allowed frqeuencies */
-static const struct soc_data sdata[] = {
-       { /* used by p2041 and p3041 */
-               .freq_mask = {0x8, 0x8, 0x2, 0x2},
-               .flag = FREQ_MASK,
-       },
-       { /* used by p5020 */
-               .freq_mask = {0x8, 0x2},
-               .flag = FREQ_MASK,
-       },
-       { /* used by p4080, p5040 */
-               .freq_mask = {0},
-               .flag = 0,
-       },
+       u32 flags;
 };
 
-/*
- * the minimum allowed core frequency, in Hz
- * for chassis v1.0, >= platform frequency
- * for chassis v2.0, >= platform frequency / 2
- */
-static u32 min_cpufreq;
-static const u32 *fmask;
-
-#if defined(CONFIG_ARM)
-static int get_cpu_physical_id(int cpu)
-{
-       return topology_core_id(cpu);
-}
-#else
-static int get_cpu_physical_id(int cpu)
-{
-       return get_hard_smp_processor_id(cpu);
-}
-#endif
-
 static u32 get_bus_freq(void)
 {
        struct device_node *soc;
@@ -101,9 +69,10 @@ static u32 get_bus_freq(void)
        return sysfreq;
 }
 
-static struct device_node *cpu_to_clk_node(int cpu)
+static struct clk *cpu_to_clk(int cpu)
 {
-       struct device_node *np, *clk_np;
+       struct device_node *np;
+       struct clk *clk;
 
        if (!cpu_present(cpu))
                return NULL;
@@ -112,37 +81,28 @@ static struct device_node *cpu_to_clk_node(int cpu)
        if (!np)
                return NULL;
 
-       clk_np = of_parse_phandle(np, "clocks", 0);
-       if (!clk_np)
-               return NULL;
-
+       clk = of_clk_get(np, 0);
        of_node_put(np);
-
-       return clk_np;
+       return clk;
 }
 
 /* traverse cpu nodes to get cpu mask of sharing clock wire */
 static void set_affected_cpus(struct cpufreq_policy *policy)
 {
-       struct device_node *np, *clk_np;
        struct cpumask *dstp = policy->cpus;
+       struct clk *clk;
        int i;
 
-       np = cpu_to_clk_node(policy->cpu);
-       if (!np)
-               return;
-
        for_each_present_cpu(i) {
-               clk_np = cpu_to_clk_node(i);
-               if (!clk_np)
+               clk = cpu_to_clk(i);
+               if (IS_ERR(clk)) {
+                       pr_err("%s: no clock for cpu %d\n", __func__, i);
                        continue;
+               }
 
-               if (clk_np == np)
+               if (clk_is_match(policy->clk, clk))
                        cpumask_set_cpu(i, dstp);
-
-               of_node_put(clk_np);
        }
-       of_node_put(np);
 }
 
 /* reduce the duplicated frequencies in frequency table */
@@ -198,10 +158,11 @@ static void freq_table_sort(struct cpufreq_frequency_table *freq_table,
 
 static int qoriq_cpufreq_cpu_init(struct cpufreq_policy *policy)
 {
-       struct device_node *np, *pnode;
+       struct device_node *np;
        int i, count, ret;
-       u32 freq, mask;
+       u32 freq;
        struct clk *clk;
+       const struct clk_hw *hwclk;
        struct cpufreq_frequency_table *table;
        struct cpu_data *data;
        unsigned int cpu = policy->cpu;
@@ -221,17 +182,13 @@ static int qoriq_cpufreq_cpu_init(struct cpufreq_policy *policy)
                goto err_nomem2;
        }
 
-       pnode = of_parse_phandle(np, "clocks", 0);
-       if (!pnode) {
-               pr_err("%s: could not get clock information\n", __func__);
-               goto err_nomem2;
-       }
+       hwclk = __clk_get_hw(policy->clk);
+       count = clk_hw_get_num_parents(hwclk);
 
-       count = of_property_count_strings(pnode, "clock-names");
        data->pclk = kcalloc(count, sizeof(struct clk *), GFP_KERNEL);
        if (!data->pclk) {
                pr_err("%s: no memory\n", __func__);
-               goto err_node;
+               goto err_nomem2;
        }
 
        table = kcalloc(count + 1, sizeof(*table), GFP_KERNEL);
@@ -240,23 +197,11 @@ static int qoriq_cpufreq_cpu_init(struct cpufreq_policy *policy)
                goto err_pclk;
        }
 
-       if (fmask)
-               mask = fmask[get_cpu_physical_id(cpu)];
-       else
-               mask = 0x0;
-
        for (i = 0; i < count; i++) {
-               clk = of_clk_get(pnode, i);
+               clk = clk_hw_get_parent_by_index(hwclk, i)->clk;
                data->pclk[i] = clk;
                freq = clk_get_rate(clk);
-               /*
-                * the clock is valid if its frequency is not masked
-                * and large than minimum allowed frequency.
-                */
-               if (freq < min_cpufreq || (mask & (1 << i)))
-                       table[i].frequency = CPUFREQ_ENTRY_INVALID;
-               else
-                       table[i].frequency = freq / 1000;
+               table[i].frequency = freq / 1000;
                table[i].driver_data = i;
        }
        freq_table_redup(table, count);
@@ -282,7 +227,6 @@ static int qoriq_cpufreq_cpu_init(struct cpufreq_policy *policy)
        policy->cpuinfo.transition_latency = u64temp + 1;
 
        of_node_put(np);
-       of_node_put(pnode);
 
        return 0;
 
@@ -290,10 +234,7 @@ err_nomem1:
        kfree(table);
 err_pclk:
        kfree(data->pclk);
-err_node:
-       of_node_put(pnode);
 err_nomem2:
-       policy->driver_data = NULL;
        kfree(data);
 err_np:
        of_node_put(np);
@@ -357,12 +298,25 @@ static struct cpufreq_driver qoriq_cpufreq_driver = {
        .attr           = cpufreq_generic_attr,
 };
 
+static const struct soc_data blacklist = {
+       .flags = SOC_BLACKLIST,
+};
+
 static const struct of_device_id node_matches[] __initconst = {
-       { .compatible = "fsl,p2041-clockgen", .data = &sdata[0], },
-       { .compatible = "fsl,p3041-clockgen", .data = &sdata[0], },
-       { .compatible = "fsl,p5020-clockgen", .data = &sdata[1], },
-       { .compatible = "fsl,p4080-clockgen", .data = &sdata[2], },
-       { .compatible = "fsl,p5040-clockgen", .data = &sdata[2], },
+       /* e6500 cannot use cpufreq due to erratum A-008083 */
+       { .compatible = "fsl,b4420-clockgen", &blacklist },
+       { .compatible = "fsl,b4860-clockgen", &blacklist },
+       { .compatible = "fsl,t2080-clockgen", &blacklist },
+       { .compatible = "fsl,t4240-clockgen", &blacklist },
+
+       { .compatible = "fsl,ls1012a-clockgen", },
+       { .compatible = "fsl,ls1021a-clockgen", },
+       { .compatible = "fsl,ls1043a-clockgen", },
+       { .compatible = "fsl,ls1046a-clockgen", },
+       { .compatible = "fsl,ls1088a-clockgen", },
+       { .compatible = "fsl,ls2080a-clockgen", },
+       { .compatible = "fsl,p4080-clockgen", },
+       { .compatible = "fsl,qoriq-clockgen-1.0", },
        { .compatible = "fsl,qoriq-clockgen-2.0", },
        {}
 };
@@ -380,16 +334,12 @@ static int __init qoriq_cpufreq_init(void)
 
        match = of_match_node(node_matches, np);
        data = match->data;
-       if (data) {
-               if (data->flag)
-                       fmask = data->freq_mask;
-               min_cpufreq = get_bus_freq();
-       } else {
-               min_cpufreq = get_bus_freq() / 2;
-       }
 
        of_node_put(np);
 
+       if (data && data->flags & SOC_BLACKLIST)
+               return -ENODEV;
+
        ret = cpufreq_register_driver(&qoriq_cpufreq_driver);
        if (!ret)
                pr_info("Freescale QorIQ CPU frequency scaling driver\n");
index d6d425773fa497274301eaa88f247fb8dd770e89..5b2db3c6568f691429fce3d636ac955e06b0bec9 100644 (file)
@@ -400,7 +400,6 @@ static int s3c2416_cpufreq_driver_init(struct cpufreq_policy *policy)
        rate = clk_get_rate(s3c_freq->hclk);
        if (rate < 133 * 1000 * 1000) {
                pr_err("cpufreq: HCLK not at 133MHz\n");
-               clk_put(s3c_freq->hclk);
                ret = -EINVAL;
                goto err_armclk;
        }
diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c
new file mode 100644 (file)
index 0000000..a7b5658
--- /dev/null
@@ -0,0 +1,268 @@
+/*
+ * TI CPUFreq/OPP hw-supported driver
+ *
+ * Copyright (C) 2016-2017 Texas Instruments, Inc.
+ *      Dave Gerlach <d-gerlach@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/cpu.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/pm_opp.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#define REVISION_MASK                          0xF
+#define REVISION_SHIFT                         28
+
+#define AM33XX_800M_ARM_MPU_MAX_FREQ           0x1E2F
+#define AM43XX_600M_ARM_MPU_MAX_FREQ           0xFFA
+
+#define DRA7_EFUSE_HAS_OD_MPU_OPP              11
+#define DRA7_EFUSE_HAS_HIGH_MPU_OPP            15
+#define DRA7_EFUSE_HAS_ALL_MPU_OPP             23
+
+#define DRA7_EFUSE_NOM_MPU_OPP                 BIT(0)
+#define DRA7_EFUSE_OD_MPU_OPP                  BIT(1)
+#define DRA7_EFUSE_HIGH_MPU_OPP                        BIT(2)
+
+#define VERSION_COUNT                          2
+
+struct ti_cpufreq_data;
+
+struct ti_cpufreq_soc_data {
+       unsigned long (*efuse_xlate)(struct ti_cpufreq_data *opp_data,
+                                    unsigned long efuse);
+       unsigned long efuse_fallback;
+       unsigned long efuse_offset;
+       unsigned long efuse_mask;
+       unsigned long efuse_shift;
+       unsigned long rev_offset;
+};
+
+struct ti_cpufreq_data {
+       struct device *cpu_dev;
+       struct device_node *opp_node;
+       struct regmap *syscon;
+       const struct ti_cpufreq_soc_data *soc_data;
+};
+
+static unsigned long amx3_efuse_xlate(struct ti_cpufreq_data *opp_data,
+                                     unsigned long efuse)
+{
+       if (!efuse)
+               efuse = opp_data->soc_data->efuse_fallback;
+       /* AM335x and AM437x use "OPP disable" bits, so invert */
+       return ~efuse;
+}
+
+static unsigned long dra7_efuse_xlate(struct ti_cpufreq_data *opp_data,
+                                     unsigned long efuse)
+{
+       unsigned long calculated_efuse = DRA7_EFUSE_NOM_MPU_OPP;
+
+       /*
+        * The efuse on dra7 and am57 parts contains a specific
+        * value indicating the highest available OPP.
+        */
+
+       switch (efuse) {
+       case DRA7_EFUSE_HAS_ALL_MPU_OPP:
+       case DRA7_EFUSE_HAS_HIGH_MPU_OPP:
+               calculated_efuse |= DRA7_EFUSE_HIGH_MPU_OPP;
+       case DRA7_EFUSE_HAS_OD_MPU_OPP:
+               calculated_efuse |= DRA7_EFUSE_OD_MPU_OPP;
+       }
+
+       return calculated_efuse;
+}
+
+static struct ti_cpufreq_soc_data am3x_soc_data = {
+       .efuse_xlate = amx3_efuse_xlate,
+       .efuse_fallback = AM33XX_800M_ARM_MPU_MAX_FREQ,
+       .efuse_offset = 0x07fc,
+       .efuse_mask = 0x1fff,
+       .rev_offset = 0x600,
+};
+
+static struct ti_cpufreq_soc_data am4x_soc_data = {
+       .efuse_xlate = amx3_efuse_xlate,
+       .efuse_fallback = AM43XX_600M_ARM_MPU_MAX_FREQ,
+       .efuse_offset = 0x0610,
+       .efuse_mask = 0x3f,
+       .rev_offset = 0x600,
+};
+
+static struct ti_cpufreq_soc_data dra7_soc_data = {
+       .efuse_xlate = dra7_efuse_xlate,
+       .efuse_offset = 0x020c,
+       .efuse_mask = 0xf80000,
+       .efuse_shift = 19,
+       .rev_offset = 0x204,
+};
+
+/**
+ * ti_cpufreq_get_efuse() - Parse and return efuse value present on SoC
+ * @opp_data: pointer to ti_cpufreq_data context
+ * @efuse_value: Set to the value parsed from efuse
+ *
+ * Returns error code if efuse not read properly.
+ */
+static int ti_cpufreq_get_efuse(struct ti_cpufreq_data *opp_data,
+                               u32 *efuse_value)
+{
+       struct device *dev = opp_data->cpu_dev;
+       u32 efuse;
+       int ret;
+
+       ret = regmap_read(opp_data->syscon, opp_data->soc_data->efuse_offset,
+                         &efuse);
+       if (ret) {
+               dev_err(dev,
+                       "Failed to read the efuse value from syscon: %d\n",
+                       ret);
+               return ret;
+       }
+
+       efuse = (efuse & opp_data->soc_data->efuse_mask);
+       efuse >>= opp_data->soc_data->efuse_shift;
+
+       *efuse_value = opp_data->soc_data->efuse_xlate(opp_data, efuse);
+
+       return 0;
+}
+
+/**
+ * ti_cpufreq_get_rev() - Parse and return rev value present on SoC
+ * @opp_data: pointer to ti_cpufreq_data context
+ * @revision_value: Set to the value parsed from revision register
+ *
+ * Returns error code if revision not read properly.
+ */
+static int ti_cpufreq_get_rev(struct ti_cpufreq_data *opp_data,
+                             u32 *revision_value)
+{
+       struct device *dev = opp_data->cpu_dev;
+       u32 revision;
+       int ret;
+
+       ret = regmap_read(opp_data->syscon, opp_data->soc_data->rev_offset,
+                         &revision);
+       if (ret) {
+               dev_err(dev,
+                       "Failed to read the revision number from syscon: %d\n",
+                       ret);
+               return ret;
+       }
+
+       *revision_value = BIT((revision >> REVISION_SHIFT) & REVISION_MASK);
+
+       return 0;
+}
+
+static int ti_cpufreq_setup_syscon_register(struct ti_cpufreq_data *opp_data)
+{
+       struct device *dev = opp_data->cpu_dev;
+       struct device_node *np = opp_data->opp_node;
+
+       opp_data->syscon = syscon_regmap_lookup_by_phandle(np,
+                                                       "syscon");
+       if (IS_ERR(opp_data->syscon)) {
+               dev_err(dev,
+                       "\"syscon\" is missing, cannot use OPPv2 table.\n");
+               return PTR_ERR(opp_data->syscon);
+       }
+
+       return 0;
+}
+
+static const struct of_device_id ti_cpufreq_of_match[] = {
+       { .compatible = "ti,am33xx", .data = &am3x_soc_data, },
+       { .compatible = "ti,am4372", .data = &am4x_soc_data, },
+       { .compatible = "ti,dra7", .data = &dra7_soc_data },
+       {},
+};
+
+static int ti_cpufreq_init(void)
+{
+       u32 version[VERSION_COUNT];
+       struct device_node *np;
+       const struct of_device_id *match;
+       struct ti_cpufreq_data *opp_data;
+       int ret;
+
+       np = of_find_node_by_path("/");
+       match = of_match_node(ti_cpufreq_of_match, np);
+       if (!match)
+               return -ENODEV;
+
+       opp_data = kzalloc(sizeof(*opp_data), GFP_KERNEL);
+       if (!opp_data)
+               return -ENOMEM;
+
+       opp_data->soc_data = match->data;
+
+       opp_data->cpu_dev = get_cpu_device(0);
+       if (!opp_data->cpu_dev) {
+               pr_err("%s: Failed to get device for CPU0\n", __func__);
+               return -ENODEV;
+       }
+
+       opp_data->opp_node = dev_pm_opp_of_get_opp_desc_node(opp_data->cpu_dev);
+       if (!opp_data->opp_node) {
+               dev_info(opp_data->cpu_dev,
+                        "OPP-v2 not supported, cpufreq-dt will attempt to use legacy tables.\n");
+               goto register_cpufreq_dt;
+       }
+
+       ret = ti_cpufreq_setup_syscon_register(opp_data);
+       if (ret)
+               goto fail_put_node;
+
+       /*
+        * OPPs determine whether or not they are supported based on
+        * two metrics:
+        *      0 - SoC Revision
+        *      1 - eFuse value
+        */
+       ret = ti_cpufreq_get_rev(opp_data, &version[0]);
+       if (ret)
+               goto fail_put_node;
+
+       ret = ti_cpufreq_get_efuse(opp_data, &version[1]);
+       if (ret)
+               goto fail_put_node;
+
+       of_node_put(opp_data->opp_node);
+
+       ret = PTR_ERR_OR_ZERO(dev_pm_opp_set_supported_hw(opp_data->cpu_dev,
+                                                         version, VERSION_COUNT));
+       if (ret) {
+               dev_err(opp_data->cpu_dev,
+                       "Failed to set supported hardware\n");
+               goto fail_put_node;
+       }
+
+register_cpufreq_dt:
+       platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
+
+       return 0;
+
+fail_put_node:
+       of_node_put(opp_data->opp_node);
+
+       return ret;
+}
+device_initcall(ti_cpufreq_init);
index d9b5b9398a0f67938c7173fa8d4d757c0d67a3ff..8d6d25c38c020e1bcc6f6f0e68d8aab97f96bc3d 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/tick.h>
 #include <linux/sched.h>
 #include <linux/math64.h>
+#include <linux/cpu.h>
 
 /*
  * Please note when changing the tuning values:
@@ -280,17 +281,23 @@ again:
 static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
 {
        struct menu_device *data = this_cpu_ptr(&menu_devices);
+       struct device *device = get_cpu_device(dev->cpu);
        int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
        int i;
        unsigned int interactivity_req;
        unsigned int expected_interval;
        unsigned long nr_iowaiters, cpu_load;
+       int resume_latency = dev_pm_qos_read_value(device);
 
        if (data->needs_update) {
                menu_update(drv, dev);
                data->needs_update = 0;
        }
 
+       /* resume_latency is 0 means no restriction */
+       if (resume_latency && resume_latency < latency_req)
+               latency_req = resume_latency;
+
        /* Special case when user has set very strict latency requirement */
        if (unlikely(latency_req == 0))
                return 0;
@@ -357,9 +364,9 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
                if (s->disabled || su->disable)
                        continue;
                if (s->target_residency > data->predicted_us)
-                       continue;
+                       break;
                if (s->exit_latency > latency_req)
-                       continue;
+                       break;
 
                data->last_state_idx = i;
        }
index e2ce8190ecc9a5c0baf3f6e642a4cff17f89862c..612898b4aaad045f7e96429ac325ecbec91a4295 100644 (file)
@@ -959,7 +959,7 @@ static irqreturn_t ccp5_irq_handler(int irq, void *data)
 static void ccp5_config(struct ccp_device *ccp)
 {
        /* Public side */
-       iowrite32(0x00001249, ccp->io_regs + CMD5_REQID_CONFIG_OFFSET);
+       iowrite32(0x0, ccp->io_regs + CMD5_REQID_CONFIG_OFFSET);
 }
 
 static void ccp5other_config(struct ccp_device *ccp)
index 830f35e6005f0e002457d57fab70a8557fb85e24..649e5610a5cea7d08a95767af1c8f8e0153e0b7a 100644 (file)
@@ -238,6 +238,7 @@ struct ccp_dma_chan {
        struct ccp_device *ccp;
 
        spinlock_t lock;
+       struct list_head created;
        struct list_head pending;
        struct list_head active;
        struct list_head complete;
index 6553912804f73f1c061aec9bdd3afd0f0d7426bc..e5d9278f40197427e913993fe9249d405585fe87 100644 (file)
@@ -63,6 +63,7 @@ static void ccp_free_chan_resources(struct dma_chan *dma_chan)
        ccp_free_desc_resources(chan->ccp, &chan->complete);
        ccp_free_desc_resources(chan->ccp, &chan->active);
        ccp_free_desc_resources(chan->ccp, &chan->pending);
+       ccp_free_desc_resources(chan->ccp, &chan->created);
 
        spin_unlock_irqrestore(&chan->lock, flags);
 }
@@ -273,6 +274,7 @@ static dma_cookie_t ccp_tx_submit(struct dma_async_tx_descriptor *tx_desc)
        spin_lock_irqsave(&chan->lock, flags);
 
        cookie = dma_cookie_assign(tx_desc);
+       list_del(&desc->entry);
        list_add_tail(&desc->entry, &chan->pending);
 
        spin_unlock_irqrestore(&chan->lock, flags);
@@ -426,7 +428,7 @@ static struct ccp_dma_desc *ccp_create_desc(struct dma_chan *dma_chan,
 
        spin_lock_irqsave(&chan->lock, sflags);
 
-       list_add_tail(&desc->entry, &chan->pending);
+       list_add_tail(&desc->entry, &chan->created);
 
        spin_unlock_irqrestore(&chan->lock, sflags);
 
@@ -610,6 +612,7 @@ static int ccp_terminate_all(struct dma_chan *dma_chan)
        /*TODO: Purge the complete list? */
        ccp_free_desc_resources(chan->ccp, &chan->active);
        ccp_free_desc_resources(chan->ccp, &chan->pending);
+       ccp_free_desc_resources(chan->ccp, &chan->created);
 
        spin_unlock_irqrestore(&chan->lock, flags);
 
@@ -679,6 +682,7 @@ int ccp_dmaengine_register(struct ccp_device *ccp)
                chan->ccp = ccp;
 
                spin_lock_init(&chan->lock);
+               INIT_LIST_HEAD(&chan->created);
                INIT_LIST_HEAD(&chan->pending);
                INIT_LIST_HEAD(&chan->active);
                INIT_LIST_HEAD(&chan->complete);
index 2ed1e24b44a8ba07b24a1ef4a9d7cca4e57522d4..b4b78b37f8a684698d79bc053be0b2f9db7e654f 100644 (file)
@@ -158,7 +158,7 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
        case CRYPTO_ALG_TYPE_AEAD:
                ctx_req.req.aead_req = (struct aead_request *)req;
                ctx_req.ctx.reqctx = aead_request_ctx(ctx_req.req.aead_req);
-               dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.req.aead_req->dst,
+               dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.ctx.reqctx->dst,
                             ctx_req.ctx.reqctx->dst_nents, DMA_FROM_DEVICE);
                if (ctx_req.ctx.reqctx->skb) {
                        kfree_skb(ctx_req.ctx.reqctx->skb);
@@ -1362,8 +1362,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
        struct chcr_wr *chcr_req;
        struct cpl_rx_phys_dsgl *phys_cpl;
        struct phys_sge_parm sg_param;
-       struct scatterlist *src, *dst;
-       struct scatterlist src_sg[2], dst_sg[2];
+       struct scatterlist *src;
        unsigned int frags = 0, transhdr_len;
        unsigned int ivsize = crypto_aead_ivsize(tfm), dst_size = 0;
        unsigned int   kctx_len = 0;
@@ -1383,19 +1382,21 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
 
        if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0)
                goto err;
-       src = scatterwalk_ffwd(src_sg, req->src, req->assoclen);
-       dst = src;
+       src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen);
+       reqctx->dst = src;
+
        if (req->src != req->dst) {
                err = chcr_copy_assoc(req, aeadctx);
                if (err)
                        return ERR_PTR(err);
-               dst = scatterwalk_ffwd(dst_sg, req->dst, req->assoclen);
+               reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, req->dst,
+                                              req->assoclen);
        }
        if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_NULL) {
                null = 1;
                assoclen = 0;
        }
-       reqctx->dst_nents = sg_nents_for_len(dst, req->cryptlen +
+       reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
                                             (op_type ? -authsize : authsize));
        if (reqctx->dst_nents <= 0) {
                pr_err("AUTHENC:Invalid Destination sg entries\n");
@@ -1460,7 +1461,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
        sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
        sg_param.qid = qid;
        sg_param.align = 0;
-       if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, dst,
+       if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, reqctx->dst,
                                  &sg_param))
                goto dstmap_fail;
 
@@ -1711,8 +1712,7 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
        struct chcr_wr *chcr_req;
        struct cpl_rx_phys_dsgl *phys_cpl;
        struct phys_sge_parm sg_param;
-       struct scatterlist *src, *dst;
-       struct scatterlist src_sg[2], dst_sg[2];
+       struct scatterlist *src;
        unsigned int frags = 0, transhdr_len, ivsize = AES_BLOCK_SIZE;
        unsigned int dst_size = 0, kctx_len;
        unsigned int sub_type;
@@ -1728,17 +1728,19 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
        if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0)
                goto err;
        sub_type = get_aead_subtype(tfm);
-       src = scatterwalk_ffwd(src_sg, req->src, req->assoclen);
-       dst = src;
+       src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen);
+       reqctx->dst = src;
+
        if (req->src != req->dst) {
                err = chcr_copy_assoc(req, aeadctx);
                if (err) {
                        pr_err("AAD copy to destination buffer fails\n");
                        return ERR_PTR(err);
                }
-               dst = scatterwalk_ffwd(dst_sg, req->dst, req->assoclen);
+               reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, req->dst,
+                                              req->assoclen);
        }
-       reqctx->dst_nents = sg_nents_for_len(dst, req->cryptlen +
+       reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
                                             (op_type ? -authsize : authsize));
        if (reqctx->dst_nents <= 0) {
                pr_err("CCM:Invalid Destination sg entries\n");
@@ -1777,7 +1779,7 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
        sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
        sg_param.qid = qid;
        sg_param.align = 0;
-       if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, dst,
+       if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, reqctx->dst,
                                  &sg_param))
                goto dstmap_fail;
 
@@ -1809,8 +1811,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
        struct chcr_wr *chcr_req;
        struct cpl_rx_phys_dsgl *phys_cpl;
        struct phys_sge_parm sg_param;
-       struct scatterlist *src, *dst;
-       struct scatterlist src_sg[2], dst_sg[2];
+       struct scatterlist *src;
        unsigned int frags = 0, transhdr_len;
        unsigned int ivsize = AES_BLOCK_SIZE;
        unsigned int dst_size = 0, kctx_len;
@@ -1832,13 +1833,14 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
        if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0)
                goto err;
 
-       src = scatterwalk_ffwd(src_sg, req->src, req->assoclen);
-       dst = src;
+       src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen);
+       reqctx->dst = src;
        if (req->src != req->dst) {
                err = chcr_copy_assoc(req, aeadctx);
                if (err)
                        return  ERR_PTR(err);
-               dst = scatterwalk_ffwd(dst_sg, req->dst, req->assoclen);
+               reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, req->dst,
+                                              req->assoclen);
        }
 
        if (!req->cryptlen)
@@ -1848,7 +1850,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
                crypt_len = AES_BLOCK_SIZE;
        else
                crypt_len = req->cryptlen;
-       reqctx->dst_nents = sg_nents_for_len(dst, req->cryptlen +
+       reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
                                             (op_type ? -authsize : authsize));
        if (reqctx->dst_nents <= 0) {
                pr_err("GCM:Invalid Destination sg entries\n");
@@ -1923,7 +1925,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
        sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
        sg_param.qid = qid;
        sg_param.align = 0;
-       if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, dst,
+       if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, reqctx->dst,
                                  &sg_param))
                goto dstmap_fail;
 
@@ -1937,7 +1939,8 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
                write_sg_to_skb(skb, &frags, src, req->cryptlen);
        } else {
                aes_gcm_empty_pld_pad(req->dst, authsize - 1);
-               write_sg_to_skb(skb, &frags, dst, crypt_len);
+               write_sg_to_skb(skb, &frags, reqctx->dst, crypt_len);
+
        }
 
        create_wreq(ctx, chcr_req, req, skb, kctx_len, size, 1,
@@ -2189,8 +2192,8 @@ static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
        unsigned int ck_size;
        int ret = 0, key_ctx_size = 0;
 
-       if (get_aead_subtype(aead) ==
-           CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
+       if (get_aead_subtype(aead) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
+           keylen > 3) {
                keylen -= 4;  /* nonce/salt is present in the last 4 bytes */
                memcpy(aeadctx->salt, key + keylen, 4);
        }
index 918da8e6e2d8a587ef7e397e149a9779a8c4a21f..1c65f07e1cc9a56f9dc88b30c10ba57e13b875ed 100644 (file)
@@ -52,6 +52,7 @@ static struct cxgb4_uld_info chcr_uld_info = {
 int assign_chcr_device(struct chcr_dev **dev)
 {
        struct uld_ctx *u_ctx;
+       int ret = -ENXIO;
 
        /*
         * Which device to use if multiple devices are available TODO
@@ -59,15 +60,14 @@ int assign_chcr_device(struct chcr_dev **dev)
         * must go to the same device to maintain the ordering.
         */
        mutex_lock(&dev_mutex); /* TODO ? */
-       u_ctx = list_first_entry(&uld_ctx_list, struct uld_ctx, entry);
-       if (!u_ctx) {
-               mutex_unlock(&dev_mutex);
-               return -ENXIO;
+       list_for_each_entry(u_ctx, &uld_ctx_list, entry)
+               if (u_ctx && u_ctx->dev) {
+                       *dev = u_ctx->dev;
+                       ret = 0;
+                       break;
        }
-
-       *dev = u_ctx->dev;
        mutex_unlock(&dev_mutex);
-       return 0;
+       return ret;
 }
 
 static int chcr_dev_add(struct uld_ctx *u_ctx)
@@ -202,10 +202,8 @@ static int chcr_uld_state_change(void *handle, enum cxgb4_state state)
 
 static int __init chcr_crypto_init(void)
 {
-       if (cxgb4_register_uld(CXGB4_ULD_CRYPTO, &chcr_uld_info)) {
+       if (cxgb4_register_uld(CXGB4_ULD_CRYPTO, &chcr_uld_info))
                pr_err("ULD register fail: No chcr crypto support in cxgb4");
-               return -1;
-       }
 
        return 0;
 }
index d5af7d64a763bc8ad4ce45d60bada6226059bff1..7ec0a8f124753d1fbcdf7b248d14177330bab977 100644 (file)
@@ -158,6 +158,9 @@ struct ablk_ctx {
 };
 struct chcr_aead_reqctx {
        struct  sk_buff *skb;
+       struct scatterlist *dst;
+       struct scatterlist srcffwd[2];
+       struct scatterlist dstffwd[2];
        short int dst_nents;
        u16 verify;
        u8 iv[CHCR_MAX_CRYPTO_IV_LEN];
index bc5cbc193aae5c6167559ea3d2b9975acc7e6a5a..5b2d78a5b5aaaffd9ba3a858269c8664cd4f89b7 100644 (file)
@@ -233,7 +233,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                              &hw_data->accel_capabilities_mask);
 
        /* Find and map all the device's BARS */
-       i = 0;
+       i = (hw_data->fuses & ADF_DEVICE_FUSECTL_MASK) ? 1 : 0;
        bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
        for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
                         ADF_PCI_MAX_BARS * 2) {
index e8822536530b99666f2f5137c25b36f7b451fe06..33f0a6251e385c8f5f72fc0f384106c368329c86 100644 (file)
@@ -69,6 +69,7 @@
 #define ADF_ERRSOU5 (0x3A000 + 0xD8)
 #define ADF_DEVICE_FUSECTL_OFFSET 0x40
 #define ADF_DEVICE_LEGFUSE_OFFSET 0x4C
+#define ADF_DEVICE_FUSECTL_MASK 0x80000000
 #define ADF_PCI_MAX_BARS 3
 #define ADF_DEVICE_NAME_LENGTH 32
 #define ADF_ETR_MAX_RINGS_PER_BANK 16
index 1e480f140663530a699d8bf51b402bce394a935e..8c4fd255a601b2d6c60e2300f5c878f8a512f57d 100644 (file)
@@ -456,7 +456,7 @@ static int qat_hal_init_esram(struct icp_qat_fw_loader_handle *handle)
        unsigned int csr_val;
        int times = 30;
 
-       if (handle->pci_dev->device == ADF_C3XXX_PCI_DEVICE_ID)
+       if (handle->pci_dev->device != ADF_DH895XCC_PCI_DEVICE_ID)
                return 0;
 
        csr_val = ADF_CSR_RD(csr_addr, 0);
@@ -716,7 +716,7 @@ int qat_hal_init(struct adf_accel_dev *accel_dev)
                (void __iomem *)((uintptr_t)handle->hal_cap_ae_xfer_csr_addr_v +
                                 LOCAL_TO_XFER_REG_OFFSET);
        handle->pci_dev = pci_info->pci_dev;
-       if (handle->pci_dev->device != ADF_C3XXX_PCI_DEVICE_ID) {
+       if (handle->pci_dev->device == ADF_DH895XCC_PCI_DEVICE_ID) {
                sram_bar =
                        &pci_info->pci_bars[hw_data->get_sram_bar_id(hw_data)];
                handle->hal_sram_addr_v = sram_bar->virt_addr;
index 9aea2c7ecbe6ec3cba2802502ca54181df0f3935..8648b32ebc8906c123561eaca53af6e48f655a1a 100644 (file)
@@ -306,7 +306,7 @@ struct devfreq_event_dev *devfreq_event_add_edev(struct device *dev,
                                                struct devfreq_event_desc *desc)
 {
        struct devfreq_event_dev *edev;
-       static atomic_t event_no = ATOMIC_INIT(0);
+       static atomic_t event_no = ATOMIC_INIT(-1);
        int ret;
 
        if (!dev || !desc)
@@ -329,7 +329,7 @@ struct devfreq_event_dev *devfreq_event_add_edev(struct device *dev,
        edev->dev.class = devfreq_event_class;
        edev->dev.release = devfreq_event_release_edev;
 
-       dev_set_name(&edev->dev, "event.%d", atomic_inc_return(&event_no) - 1);
+       dev_set_name(&edev->dev, "event%d", atomic_inc_return(&event_no));
        ret = device_register(&edev->dev);
        if (ret < 0) {
                put_device(&edev->dev);
index 253525ea17afabef1b61c36d06969aa82a3dd7e9..551a271353d2a49f98221a03af5c235405d50fe5 100644 (file)
@@ -128,7 +128,7 @@ static void devfreq_set_freq_table(struct devfreq *devfreq)
  * @devfreq:   the devfreq instance
  * @freq:      the update target frequency
  */
-static int devfreq_update_status(struct devfreq *devfreq, unsigned long freq)
+int devfreq_update_status(struct devfreq *devfreq, unsigned long freq)
 {
        int lev, prev_lev, ret = 0;
        unsigned long cur_time;
@@ -164,6 +164,7 @@ out:
        devfreq->last_stat_updated = cur_time;
        return ret;
 }
+EXPORT_SYMBOL(devfreq_update_status);
 
 /**
  * find_devfreq_governor() - find devfreq governor from name
@@ -472,11 +473,15 @@ static int devfreq_notifier_call(struct notifier_block *nb, unsigned long type,
 }
 
 /**
- * _remove_devfreq() - Remove devfreq from the list and release its resources.
- * @devfreq:   the devfreq struct
+ * devfreq_dev_release() - Callback for struct device to release the device.
+ * @dev:       the devfreq device
+ *
+ * Remove devfreq from the list and release its resources.
  */
-static void _remove_devfreq(struct devfreq *devfreq)
+static void devfreq_dev_release(struct device *dev)
 {
+       struct devfreq *devfreq = to_devfreq(dev);
+
        mutex_lock(&devfreq_list_lock);
        if (IS_ERR(find_device_devfreq(devfreq->dev.parent))) {
                mutex_unlock(&devfreq_list_lock);
@@ -497,19 +502,6 @@ static void _remove_devfreq(struct devfreq *devfreq)
        kfree(devfreq);
 }
 
-/**
- * devfreq_dev_release() - Callback for struct device to release the device.
- * @dev:       the devfreq device
- *
- * This calls _remove_devfreq() if _remove_devfreq() is not called.
- */
-static void devfreq_dev_release(struct device *dev)
-{
-       struct devfreq *devfreq = to_devfreq(dev);
-
-       _remove_devfreq(devfreq);
-}
-
 /**
  * devfreq_add_device() - Add devfreq feature to the device
  * @dev:       the device to add devfreq feature.
@@ -525,6 +517,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
 {
        struct devfreq *devfreq;
        struct devfreq_governor *governor;
+       static atomic_t devfreq_no = ATOMIC_INIT(-1);
        int err = 0;
 
        if (!dev || !profile || !governor_name) {
@@ -536,15 +529,14 @@ struct devfreq *devfreq_add_device(struct device *dev,
        devfreq = find_device_devfreq(dev);
        mutex_unlock(&devfreq_list_lock);
        if (!IS_ERR(devfreq)) {
-               dev_err(dev, "%s: Unable to create devfreq for the device. It already has one.\n", __func__);
+               dev_err(dev, "%s: Unable to create devfreq for the device.\n",
+                       __func__);
                err = -EINVAL;
                goto err_out;
        }
 
        devfreq = kzalloc(sizeof(struct devfreq), GFP_KERNEL);
        if (!devfreq) {
-               dev_err(dev, "%s: Unable to create devfreq for the device\n",
-                       __func__);
                err = -ENOMEM;
                goto err_out;
        }
@@ -567,18 +559,21 @@ struct devfreq *devfreq_add_device(struct device *dev,
                mutex_lock(&devfreq->lock);
        }
 
-       dev_set_name(&devfreq->dev, "%s", dev_name(dev));
+       dev_set_name(&devfreq->dev, "devfreq%d",
+                               atomic_inc_return(&devfreq_no));
        err = device_register(&devfreq->dev);
        if (err) {
                mutex_unlock(&devfreq->lock);
                goto err_out;
        }
 
-       devfreq->trans_table =  devm_kzalloc(&devfreq->dev, sizeof(unsigned int) *
+       devfreq->trans_table =  devm_kzalloc(&devfreq->dev,
+                                               sizeof(unsigned int) *
                                                devfreq->profile->max_state *
                                                devfreq->profile->max_state,
                                                GFP_KERNEL);
-       devfreq->time_in_state = devm_kzalloc(&devfreq->dev, sizeof(unsigned long) *
+       devfreq->time_in_state = devm_kzalloc(&devfreq->dev,
+                                               sizeof(unsigned long) *
                                                devfreq->profile->max_state,
                                                GFP_KERNEL);
        devfreq->last_stat_updated = jiffies;
@@ -937,6 +932,9 @@ static ssize_t governor_store(struct device *dev, struct device_attribute *attr,
        if (df->governor == governor) {
                ret = 0;
                goto out;
+       } else if (df->governor->immutable || governor->immutable) {
+               ret = -EINVAL;
+               goto out;
        }
 
        if (df->governor) {
@@ -966,13 +964,33 @@ static ssize_t available_governors_show(struct device *d,
                                        struct device_attribute *attr,
                                        char *buf)
 {
-       struct devfreq_governor *tmp_governor;
+       struct devfreq *df = to_devfreq(d);
        ssize_t count = 0;
 
        mutex_lock(&devfreq_list_lock);
-       list_for_each_entry(tmp_governor, &devfreq_governor_list, node)
-               count += scnprintf(&buf[count], (PAGE_SIZE - count - 2),
-                                  "%s ", tmp_governor->name);
+
+       /*
+        * The devfreq with immutable governor (e.g., passive) shows
+        * only own governor.
+        */
+       if (df->governor->immutable) {
+               count = scnprintf(&buf[count], DEVFREQ_NAME_LEN,
+                                  "%s ", df->governor_name);
+       /*
+        * The devfreq device shows the registered governor except for
+        * immutable governors such as passive governor .
+        */
+       } else {
+               struct devfreq_governor *governor;
+
+               list_for_each_entry(governor, &devfreq_governor_list, node) {
+                       if (governor->immutable)
+                               continue;
+                       count += scnprintf(&buf[count], (PAGE_SIZE - count - 2),
+                                          "%s ", governor->name);
+               }
+       }
+
        mutex_unlock(&devfreq_list_lock);
 
        /* Truncate the trailing space */
@@ -993,7 +1011,7 @@ static ssize_t cur_freq_show(struct device *dev, struct device_attribute *attr,
 
        if (devfreq->profile->get_cur_freq &&
                !devfreq->profile->get_cur_freq(devfreq->dev.parent, &freq))
-                       return sprintf(buf, "%lu\n", freq);
+               return sprintf(buf, "%lu\n", freq);
 
        return sprintf(buf, "%lu\n", devfreq->previous_freq);
 }
index 107eb91a9415d26eb429bee2c47dc39ac132e648..9b7350935b73259828a897a7eea6d4fc9b2e4c4f 100644 (file)
 #include <linux/module.h>
 #include <linux/of_address.h>
 #include <linux/platform_device.h>
+#include <linux/regmap.h>
 #include <linux/suspend.h>
 #include <linux/devfreq-event.h>
 
 #include "exynos-ppmu.h"
 
 struct exynos_ppmu_data {
-       void __iomem *base;
        struct clk *clk;
 };
 
@@ -33,6 +33,7 @@ struct exynos_ppmu {
        unsigned int num_events;
 
        struct device *dev;
+       struct regmap *regmap;
 
        struct exynos_ppmu_data ppmu;
 };
@@ -107,20 +108,28 @@ static int exynos_ppmu_find_ppmu_id(struct devfreq_event_dev *edev)
 static int exynos_ppmu_disable(struct devfreq_event_dev *edev)
 {
        struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
+       int ret;
        u32 pmnc;
 
        /* Disable all counters */
-       __raw_writel(PPMU_CCNT_MASK |
-                    PPMU_PMCNT0_MASK |
-                    PPMU_PMCNT1_MASK |
-                    PPMU_PMCNT2_MASK |
-                    PPMU_PMCNT3_MASK,
-                    info->ppmu.base + PPMU_CNTENC);
+       ret = regmap_write(info->regmap, PPMU_CNTENC,
+                               PPMU_CCNT_MASK |
+                               PPMU_PMCNT0_MASK |
+                               PPMU_PMCNT1_MASK |
+                               PPMU_PMCNT2_MASK |
+                               PPMU_PMCNT3_MASK);
+       if (ret < 0)
+               return ret;
 
        /* Disable PPMU */
-       pmnc = __raw_readl(info->ppmu.base + PPMU_PMNC);
+       ret = regmap_read(info->regmap, PPMU_PMNC, &pmnc);
+       if (ret < 0)
+               return ret;
+
        pmnc &= ~PPMU_PMNC_ENABLE_MASK;
-       __raw_writel(pmnc, info->ppmu.base + PPMU_PMNC);
+       ret = regmap_write(info->regmap, PPMU_PMNC, pmnc);
+       if (ret < 0)
+               return ret;
 
        return 0;
 }
@@ -129,29 +138,42 @@ static int exynos_ppmu_set_event(struct devfreq_event_dev *edev)
 {
        struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
        int id = exynos_ppmu_find_ppmu_id(edev);
+       int ret;
        u32 pmnc, cntens;
 
        if (id < 0)
                return id;
 
        /* Enable specific counter */
-       cntens = __raw_readl(info->ppmu.base + PPMU_CNTENS);
+       ret = regmap_read(info->regmap, PPMU_CNTENS, &cntens);
+       if (ret < 0)
+               return ret;
+
        cntens |= (PPMU_CCNT_MASK | (PPMU_ENABLE << id));
-       __raw_writel(cntens, info->ppmu.base + PPMU_CNTENS);
+       ret = regmap_write(info->regmap, PPMU_CNTENS, cntens);
+       if (ret < 0)
+               return ret;
 
        /* Set the event of Read/Write data count  */
-       __raw_writel(PPMU_RO_DATA_CNT | PPMU_WO_DATA_CNT,
-                       info->ppmu.base + PPMU_BEVTxSEL(id));
+       ret = regmap_write(info->regmap, PPMU_BEVTxSEL(id),
+                               PPMU_RO_DATA_CNT | PPMU_WO_DATA_CNT);
+       if (ret < 0)
+               return ret;
 
        /* Reset cycle counter/performance counter and enable PPMU */
-       pmnc = __raw_readl(info->ppmu.base + PPMU_PMNC);
+       ret = regmap_read(info->regmap, PPMU_PMNC, &pmnc);
+       if (ret < 0)
+               return ret;
+
        pmnc &= ~(PPMU_PMNC_ENABLE_MASK
                        | PPMU_PMNC_COUNTER_RESET_MASK
                        | PPMU_PMNC_CC_RESET_MASK);
        pmnc |= (PPMU_ENABLE << PPMU_PMNC_ENABLE_SHIFT);
        pmnc |= (PPMU_ENABLE << PPMU_PMNC_COUNTER_RESET_SHIFT);
        pmnc |= (PPMU_ENABLE << PPMU_PMNC_CC_RESET_SHIFT);
-       __raw_writel(pmnc, info->ppmu.base + PPMU_PMNC);
+       ret = regmap_write(info->regmap, PPMU_PMNC, pmnc);
+       if (ret < 0)
+               return ret;
 
        return 0;
 }
@@ -161,40 +183,64 @@ static int exynos_ppmu_get_event(struct devfreq_event_dev *edev,
 {
        struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
        int id = exynos_ppmu_find_ppmu_id(edev);
-       u32 pmnc, cntenc;
+       unsigned int total_count, load_count;
+       unsigned int pmcnt3_high, pmcnt3_low;
+       unsigned int pmnc, cntenc;
+       int ret;
 
        if (id < 0)
                return -EINVAL;
 
        /* Disable PPMU */
-       pmnc = __raw_readl(info->ppmu.base + PPMU_PMNC);
+       ret = regmap_read(info->regmap, PPMU_PMNC, &pmnc);
+       if (ret < 0)
+               return ret;
+
        pmnc &= ~PPMU_PMNC_ENABLE_MASK;
-       __raw_writel(pmnc, info->ppmu.base + PPMU_PMNC);
+       ret = regmap_write(info->regmap, PPMU_PMNC, pmnc);
+       if (ret < 0)
+               return ret;
 
        /* Read cycle count */
-       edata->total_count = __raw_readl(info->ppmu.base + PPMU_CCNT);
+       ret = regmap_read(info->regmap, PPMU_CCNT, &total_count);
+       if (ret < 0)
+               return ret;
+       edata->total_count = total_count;
 
        /* Read performance count */
        switch (id) {
        case PPMU_PMNCNT0:
        case PPMU_PMNCNT1:
        case PPMU_PMNCNT2:
-               edata->load_count
-                       = __raw_readl(info->ppmu.base + PPMU_PMNCT(id));
+               ret = regmap_read(info->regmap, PPMU_PMNCT(id), &load_count);
+               if (ret < 0)
+                       return ret;
+               edata->load_count = load_count;
                break;
        case PPMU_PMNCNT3:
-               edata->load_count =
-                       ((__raw_readl(info->ppmu.base + PPMU_PMCNT3_HIGH) << 8)
-                       | __raw_readl(info->ppmu.base + PPMU_PMCNT3_LOW));
+               ret = regmap_read(info->regmap, PPMU_PMCNT3_HIGH, &pmcnt3_high);
+               if (ret < 0)
+                       return ret;
+
+               ret = regmap_read(info->regmap, PPMU_PMCNT3_LOW, &pmcnt3_low);
+               if (ret < 0)
+                       return ret;
+
+               edata->load_count = ((pmcnt3_high << 8) | pmcnt3_low);
                break;
        default:
                return -EINVAL;
        }
 
        /* Disable specific counter */
-       cntenc = __raw_readl(info->ppmu.base + PPMU_CNTENC);
+       ret = regmap_read(info->regmap, PPMU_CNTENC, &cntenc);
+       if (ret < 0)
+               return ret;
+
        cntenc |= (PPMU_CCNT_MASK | (PPMU_ENABLE << id));
-       __raw_writel(cntenc, info->ppmu.base + PPMU_CNTENC);
+       ret = regmap_write(info->regmap, PPMU_CNTENC, cntenc);
+       if (ret < 0)
+               return ret;
 
        dev_dbg(&edev->dev, "%s (event: %ld/%ld)\n", edev->desc->name,
                                        edata->load_count, edata->total_count);
@@ -214,36 +260,93 @@ static const struct devfreq_event_ops exynos_ppmu_ops = {
 static int exynos_ppmu_v2_disable(struct devfreq_event_dev *edev)
 {
        struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
+       int ret;
        u32 pmnc, clear;
 
        /* Disable all counters */
        clear = (PPMU_CCNT_MASK | PPMU_PMCNT0_MASK | PPMU_PMCNT1_MASK
                | PPMU_PMCNT2_MASK | PPMU_PMCNT3_MASK);
+       ret = regmap_write(info->regmap, PPMU_V2_FLAG, clear);
+       if (ret < 0)
+               return ret;
+
+       ret = regmap_write(info->regmap, PPMU_V2_INTENC, clear);
+       if (ret < 0)
+               return ret;
+
+       ret = regmap_write(info->regmap, PPMU_V2_CNTENC, clear);
+       if (ret < 0)
+               return ret;
+
+       ret = regmap_write(info->regmap, PPMU_V2_CNT_RESET, clear);
+       if (ret < 0)
+               return ret;
+
+       ret = regmap_write(info->regmap, PPMU_V2_CIG_CFG0, 0x0);
+       if (ret < 0)
+               return ret;
+
+       ret = regmap_write(info->regmap, PPMU_V2_CIG_CFG1, 0x0);
+       if (ret < 0)
+               return ret;
+
+       ret = regmap_write(info->regmap, PPMU_V2_CIG_CFG2, 0x0);
+       if (ret < 0)
+               return ret;
+
+       ret = regmap_write(info->regmap, PPMU_V2_CIG_RESULT, 0x0);
+       if (ret < 0)
+               return ret;
+
+       ret = regmap_write(info->regmap, PPMU_V2_CNT_AUTO, 0x0);
+       if (ret < 0)
+               return ret;
+
+       ret = regmap_write(info->regmap, PPMU_V2_CH_EV0_TYPE, 0x0);
+       if (ret < 0)
+               return ret;
+
+       ret = regmap_write(info->regmap, PPMU_V2_CH_EV1_TYPE, 0x0);
+       if (ret < 0)
+               return ret;
 
-       __raw_writel(clear, info->ppmu.base + PPMU_V2_FLAG);
-       __raw_writel(clear, info->ppmu.base + PPMU_V2_INTENC);
-       __raw_writel(clear, info->ppmu.base + PPMU_V2_CNTENC);
-       __raw_writel(clear, info->ppmu.base + PPMU_V2_CNT_RESET);
-
-       __raw_writel(0x0, info->ppmu.base + PPMU_V2_CIG_CFG0);
-       __raw_writel(0x0, info->ppmu.base + PPMU_V2_CIG_CFG1);
-       __raw_writel(0x0, info->ppmu.base + PPMU_V2_CIG_CFG2);
-       __raw_writel(0x0, info->ppmu.base + PPMU_V2_CIG_RESULT);
-       __raw_writel(0x0, info->ppmu.base + PPMU_V2_CNT_AUTO);
-       __raw_writel(0x0, info->ppmu.base + PPMU_V2_CH_EV0_TYPE);
-       __raw_writel(0x0, info->ppmu.base + PPMU_V2_CH_EV1_TYPE);
-       __raw_writel(0x0, info->ppmu.base + PPMU_V2_CH_EV2_TYPE);
-       __raw_writel(0x0, info->ppmu.base + PPMU_V2_CH_EV3_TYPE);
-       __raw_writel(0x0, info->ppmu.base + PPMU_V2_SM_ID_V);
-       __raw_writel(0x0, info->ppmu.base + PPMU_V2_SM_ID_A);
-       __raw_writel(0x0, info->ppmu.base + PPMU_V2_SM_OTHERS_V);
-       __raw_writel(0x0, info->ppmu.base + PPMU_V2_SM_OTHERS_A);
-       __raw_writel(0x0, info->ppmu.base + PPMU_V2_INTERRUPT_RESET);
+       ret = regmap_write(info->regmap, PPMU_V2_CH_EV2_TYPE, 0x0);
+       if (ret < 0)
+               return ret;
+
+       ret = regmap_write(info->regmap, PPMU_V2_CH_EV3_TYPE, 0x0);
+       if (ret < 0)
+               return ret;
+
+       ret = regmap_write(info->regmap, PPMU_V2_SM_ID_V, 0x0);
+       if (ret < 0)
+               return ret;
+
+       ret = regmap_write(info->regmap, PPMU_V2_SM_ID_A, 0x0);
+       if (ret < 0)
+               return ret;
+
+       ret = regmap_write(info->regmap, PPMU_V2_SM_OTHERS_V, 0x0);
+       if (ret < 0)
+               return ret;
+
+       ret = regmap_write(info->regmap, PPMU_V2_SM_OTHERS_A, 0x0);
+       if (ret < 0)
+               return ret;
+
+       ret = regmap_write(info->regmap, PPMU_V2_INTERRUPT_RESET, 0x0);
+       if (ret < 0)
+               return ret;
 
        /* Disable PPMU */
-       pmnc = __raw_readl(info->ppmu.base + PPMU_V2_PMNC);
+       ret = regmap_read(info->regmap, PPMU_V2_PMNC, &pmnc);
+       if (ret < 0)
+               return ret;
+
        pmnc &= ~PPMU_PMNC_ENABLE_MASK;
-       __raw_writel(pmnc, info->ppmu.base + PPMU_V2_PMNC);
+       ret = regmap_write(info->regmap, PPMU_V2_PMNC, pmnc);
+       if (ret < 0)
+               return ret;
 
        return 0;
 }
@@ -251,30 +354,43 @@ static int exynos_ppmu_v2_disable(struct devfreq_event_dev *edev)
 static int exynos_ppmu_v2_set_event(struct devfreq_event_dev *edev)
 {
        struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
+       unsigned int pmnc, cntens;
        int id = exynos_ppmu_find_ppmu_id(edev);
-       u32 pmnc, cntens;
+       int ret;
 
        /* Enable all counters */
-       cntens = __raw_readl(info->ppmu.base + PPMU_V2_CNTENS);
+       ret = regmap_read(info->regmap, PPMU_V2_CNTENS, &cntens);
+       if (ret < 0)
+               return ret;
+
        cntens |= (PPMU_CCNT_MASK | (PPMU_ENABLE << id));
-       __raw_writel(cntens, info->ppmu.base + PPMU_V2_CNTENS);
+       ret = regmap_write(info->regmap, PPMU_V2_CNTENS, cntens);
+       if (ret < 0)
+               return ret;
 
        /* Set the event of Read/Write data count  */
        switch (id) {
        case PPMU_PMNCNT0:
        case PPMU_PMNCNT1:
        case PPMU_PMNCNT2:
-               __raw_writel(PPMU_V2_RO_DATA_CNT | PPMU_V2_WO_DATA_CNT,
-                               info->ppmu.base + PPMU_V2_CH_EVx_TYPE(id));
+               ret = regmap_write(info->regmap, PPMU_V2_CH_EVx_TYPE(id),
+                               PPMU_V2_RO_DATA_CNT | PPMU_V2_WO_DATA_CNT);
+               if (ret < 0)
+                       return ret;
                break;
        case PPMU_PMNCNT3:
-               __raw_writel(PPMU_V2_EVT3_RW_DATA_CNT,
-                               info->ppmu.base + PPMU_V2_CH_EVx_TYPE(id));
+               ret = regmap_write(info->regmap, PPMU_V2_CH_EVx_TYPE(id),
+                               PPMU_V2_EVT3_RW_DATA_CNT);
+               if (ret < 0)
+                       return ret;
                break;
        }
 
        /* Reset cycle counter/performance counter and enable PPMU */
-       pmnc = __raw_readl(info->ppmu.base + PPMU_V2_PMNC);
+       ret = regmap_read(info->regmap, PPMU_V2_PMNC, &pmnc);
+       if (ret < 0)
+               return ret;
+
        pmnc &= ~(PPMU_PMNC_ENABLE_MASK
                        | PPMU_PMNC_COUNTER_RESET_MASK
                        | PPMU_PMNC_CC_RESET_MASK
@@ -284,7 +400,10 @@ static int exynos_ppmu_v2_set_event(struct devfreq_event_dev *edev)
        pmnc |= (PPMU_ENABLE << PPMU_PMNC_COUNTER_RESET_SHIFT);
        pmnc |= (PPMU_ENABLE << PPMU_PMNC_CC_RESET_SHIFT);
        pmnc |= (PPMU_V2_MODE_MANUAL << PPMU_V2_PMNC_START_MODE_SHIFT);
-       __raw_writel(pmnc, info->ppmu.base + PPMU_V2_PMNC);
+
+       ret = regmap_write(info->regmap, PPMU_V2_PMNC, pmnc);
+       if (ret < 0)
+               return ret;
 
        return 0;
 }
@@ -294,37 +413,61 @@ static int exynos_ppmu_v2_get_event(struct devfreq_event_dev *edev,
 {
        struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
        int id = exynos_ppmu_find_ppmu_id(edev);
-       u32 pmnc, cntenc;
-       u32 pmcnt_high, pmcnt_low;
-       u64 load_count = 0;
+       int ret;
+       unsigned int pmnc, cntenc;
+       unsigned int pmcnt_high, pmcnt_low;
+       unsigned int total_count, count;
+       unsigned long load_count = 0;
 
        /* Disable PPMU */
-       pmnc = __raw_readl(info->ppmu.base + PPMU_V2_PMNC);
+       ret = regmap_read(info->regmap, PPMU_V2_PMNC, &pmnc);
+       if (ret < 0)
+               return ret;
+
        pmnc &= ~PPMU_PMNC_ENABLE_MASK;
-       __raw_writel(pmnc, info->ppmu.base + PPMU_V2_PMNC);
+       ret = regmap_write(info->regmap, PPMU_V2_PMNC, pmnc);
+       if (ret < 0)
+               return ret;
 
        /* Read cycle count and performance count */
-       edata->total_count = __raw_readl(info->ppmu.base + PPMU_V2_CCNT);
+       ret = regmap_read(info->regmap, PPMU_V2_CCNT, &total_count);
+       if (ret < 0)
+               return ret;
+       edata->total_count = total_count;
 
        switch (id) {
        case PPMU_PMNCNT0:
        case PPMU_PMNCNT1:
        case PPMU_PMNCNT2:
-               load_count = __raw_readl(info->ppmu.base + PPMU_V2_PMNCT(id));
+               ret = regmap_read(info->regmap, PPMU_V2_PMNCT(id), &count);
+               if (ret < 0)
+                       return ret;
+               load_count = count;
                break;
        case PPMU_PMNCNT3:
-               pmcnt_high = __raw_readl(info->ppmu.base + PPMU_V2_PMCNT3_HIGH);
-               pmcnt_low = __raw_readl(info->ppmu.base + PPMU_V2_PMCNT3_LOW);
-               load_count = ((u64)((pmcnt_high & 0xff)) << 32)
-                          + (u64)pmcnt_low;
+               ret = regmap_read(info->regmap, PPMU_V2_PMCNT3_HIGH,
+                                               &pmcnt_high);
+               if (ret < 0)
+                       return ret;
+
+               ret = regmap_read(info->regmap, PPMU_V2_PMCNT3_LOW, &pmcnt_low);
+               if (ret < 0)
+                       return ret;
+
+               load_count = ((u64)((pmcnt_high & 0xff)) << 32)+ (u64)pmcnt_low;
                break;
        }
        edata->load_count = load_count;
 
        /* Disable all counters */
-       cntenc = __raw_readl(info->ppmu.base + PPMU_V2_CNTENC);
+       ret = regmap_read(info->regmap, PPMU_V2_CNTENC, &cntenc);
+       if (ret < 0)
+               return 0;
+
        cntenc |= (PPMU_CCNT_MASK | (PPMU_ENABLE << id));
-       __raw_writel(cntenc, info->ppmu.base + PPMU_V2_CNTENC);
+       ret = regmap_write(info->regmap, PPMU_V2_CNTENC, cntenc);
+       if (ret < 0)
+               return ret;
 
        dev_dbg(&edev->dev, "%25s (load: %ld / %ld)\n", edev->desc->name,
                                        edata->load_count, edata->total_count);
@@ -411,10 +554,19 @@ static int of_get_devfreq_events(struct device_node *np,
        return 0;
 }
 
-static int exynos_ppmu_parse_dt(struct exynos_ppmu *info)
+static struct regmap_config exynos_ppmu_regmap_config = {
+       .reg_bits = 32,
+       .val_bits = 32,
+       .reg_stride = 4,
+};
+
+static int exynos_ppmu_parse_dt(struct platform_device *pdev,
+                               struct exynos_ppmu *info)
 {
        struct device *dev = info->dev;
        struct device_node *np = dev->of_node;
+       struct resource *res;
+       void __iomem *base;
        int ret = 0;
 
        if (!np) {
@@ -423,10 +575,17 @@ static int exynos_ppmu_parse_dt(struct exynos_ppmu *info)
        }
 
        /* Maps the memory mapped IO to control PPMU register */
-       info->ppmu.base = of_iomap(np, 0);
-       if (IS_ERR_OR_NULL(info->ppmu.base)) {
-               dev_err(dev, "failed to map memory region\n");
-               return -ENOMEM;
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       base = devm_ioremap_resource(dev, res);
+       if (IS_ERR(base))
+               return PTR_ERR(base);
+
+       exynos_ppmu_regmap_config.max_register = resource_size(res) - 4;
+       info->regmap = devm_regmap_init_mmio(dev, base,
+                                       &exynos_ppmu_regmap_config);
+       if (IS_ERR(info->regmap)) {
+               dev_err(dev, "failed to initialize regmap\n");
+               return PTR_ERR(info->regmap);
        }
 
        info->ppmu.clk = devm_clk_get(dev, "ppmu");
@@ -438,15 +597,10 @@ static int exynos_ppmu_parse_dt(struct exynos_ppmu *info)
        ret = of_get_devfreq_events(np, info);
        if (ret < 0) {
                dev_err(dev, "failed to parse exynos ppmu dt node\n");
-               goto err;
+               return ret;
        }
 
        return 0;
-
-err:
-       iounmap(info->ppmu.base);
-
-       return ret;
 }
 
 static int exynos_ppmu_probe(struct platform_device *pdev)
@@ -463,7 +617,7 @@ static int exynos_ppmu_probe(struct platform_device *pdev)
        info->dev = &pdev->dev;
 
        /* Parse dt data to get resource */
-       ret = exynos_ppmu_parse_dt(info);
+       ret = exynos_ppmu_parse_dt(pdev, info);
        if (ret < 0) {
                dev_err(&pdev->dev,
                        "failed to parse devicetree for resource\n");
@@ -476,8 +630,7 @@ static int exynos_ppmu_probe(struct platform_device *pdev)
        if (!info->edev) {
                dev_err(&pdev->dev,
                        "failed to allocate memory devfreq-event devices\n");
-               ret = -ENOMEM;
-               goto err;
+               return -ENOMEM;
        }
        edev = info->edev;
        platform_set_drvdata(pdev, info);
@@ -488,17 +641,16 @@ static int exynos_ppmu_probe(struct platform_device *pdev)
                        ret = PTR_ERR(edev[i]);
                        dev_err(&pdev->dev,
                                "failed to add devfreq-event device\n");
-                       goto err;
+                       return PTR_ERR(edev[i]);
                }
+
+               pr_info("exynos-ppmu: new PPMU device registered %s (%s)\n",
+                       dev_name(&pdev->dev), desc[i].name);
        }
 
        clk_prepare_enable(info->ppmu.clk);
 
        return 0;
-err:
-       iounmap(info->ppmu.base);
-
-       return ret;
 }
 
 static int exynos_ppmu_remove(struct platform_device *pdev)
@@ -506,7 +658,6 @@ static int exynos_ppmu_remove(struct platform_device *pdev)
        struct exynos_ppmu *info = platform_get_drvdata(pdev);
 
        clk_disable_unprepare(info->ppmu.clk);
-       iounmap(info->ppmu.base);
 
        return 0;
 }
index c6d850cddd9881ef73b2516476eb2c955a66a880..49f68929e024fa9fc95b8f31eda07edd5218bb53 100644 (file)
@@ -146,8 +146,8 @@ static int exynos_bus_target(struct device *dev, unsigned long *freq, u32 flags)
        }
        bus->curr_freq = new_freq;
 
-       dev_dbg(dev, "Set the frequency of bus (%lukHz -> %lukHz)\n",
-                       old_freq/1000, new_freq/1000);
+       dev_dbg(dev, "Set the frequency of bus (%luHz -> %luHz, %luHz)\n",
+                       old_freq, new_freq, clk_get_rate(bus->clk));
 out:
        mutex_unlock(&bus->lock);
 
@@ -239,8 +239,8 @@ static int exynos_bus_passive_target(struct device *dev, unsigned long *freq,
        *freq = new_freq;
        bus->curr_freq = new_freq;
 
-       dev_dbg(dev, "Set the frequency of bus (%lukHz -> %lukHz)\n",
-                       old_freq/1000, new_freq/1000);
+       dev_dbg(dev, "Set the frequency of bus (%luHz -> %luHz, %luHz)\n",
+                       old_freq, new_freq, clk_get_rate(bus->clk));
 out:
        mutex_unlock(&bus->lock);
 
index fad7d63219786387e191ea4130bf5a0b7b37001b..71576b8bdfeff2f5fc5a1e5108eef21c35396949 100644 (file)
@@ -38,4 +38,6 @@ extern void devfreq_interval_update(struct devfreq *devfreq,
 extern int devfreq_add_governor(struct devfreq_governor *governor);
 extern int devfreq_remove_governor(struct devfreq_governor *governor);
 
+extern int devfreq_update_status(struct devfreq *devfreq, unsigned long freq);
+
 #endif /* _GOVERNOR_H */
index bd452236dba438e487a608835effa9f4aeccc80c..673ad8cc9a1d093e3db6e2d335742a1aee529953 100644 (file)
@@ -112,6 +112,11 @@ static int update_devfreq_passive(struct devfreq *devfreq, unsigned long freq)
        if (ret < 0)
                goto out;
 
+       if (devfreq->profile->freq_table
+               && (devfreq_update_status(devfreq, freq)))
+               dev_err(&devfreq->dev,
+                       "Couldn't update frequency transition information.\n");
+
        devfreq->previous_freq = freq;
 
 out:
@@ -179,6 +184,7 @@ static int devfreq_passive_event_handler(struct devfreq *devfreq,
 
 static struct devfreq_governor devfreq_passive = {
        .name = "passive",
+       .immutable = 1,
        .get_target_freq = devfreq_passive_get_target_freq,
        .event_handler = devfreq_passive_event_handler,
 };
index 35de6e83c1febedd99f8c155e374b5d4f8bb5202..176976068bcd1552d0a625ea47ebdf72812b4c6b 100644 (file)
@@ -1,5 +1,5 @@
 /*
- *  linux/drivers/devfreq/governor_simpleondemand.c
+ *  linux/drivers/devfreq/governor_userspace.c
  *
  *  Copyright (C) 2011 Samsung Electronics
  *     MyungJoo Ham <myungjoo.ham@samsung.com>
@@ -50,7 +50,6 @@ static ssize_t store_freq(struct device *dev, struct device_attribute *attr,
        unsigned long wanted;
        int err = 0;
 
-
        mutex_lock(&devfreq->lock);
        data = devfreq->data;
 
@@ -112,7 +111,13 @@ out:
 
 static void userspace_exit(struct devfreq *devfreq)
 {
-       sysfs_remove_group(&devfreq->dev.kobj, &dev_attr_group);
+       /*
+        * Remove the sysfs entry, unless this is being called after
+        * device_del(), which should have done this already via kobject_del().
+        */
+       if (devfreq->dev.kobj.sd)
+               sysfs_remove_group(&devfreq->dev.kobj, &dev_attr_group);
+
        kfree(devfreq->data);
        devfreq->data = NULL;
 }
index d5ba43a87a682b6e718d5e2ad7c804498bad61de..200828c60db9ffce12b968956271954a88ac54d0 100644 (file)
@@ -153,6 +153,8 @@ struct cppi41_dd {
 
        /* context for suspend/resume */
        unsigned int dma_tdfdq;
+
+       bool is_suspended;
 };
 
 #define FIST_COMPLETION_QUEUE  93
@@ -257,6 +259,10 @@ static struct cppi41_channel *desc_to_chan(struct cppi41_dd *cdd, u32 desc)
        BUG_ON(desc_num >= ALLOC_DECS_NUM);
        c = cdd->chan_busy[desc_num];
        cdd->chan_busy[desc_num] = NULL;
+
+       /* Usecount for chan_busy[], paired with push_desc_queue() */
+       pm_runtime_put(cdd->ddev.dev);
+
        return c;
 }
 
@@ -317,12 +323,12 @@ static irqreturn_t cppi41_irq(int irq, void *data)
 
                while (val) {
                        u32 desc, len;
-                       int error;
 
-                       error = pm_runtime_get(cdd->ddev.dev);
-                       if (error < 0)
-                               dev_err(cdd->ddev.dev, "%s pm runtime get: %i\n",
-                                       __func__, error);
+                       /*
+                        * This should never trigger, see the comments in
+                        * push_desc_queue()
+                        */
+                       WARN_ON(cdd->is_suspended);
 
                        q_num = __fls(val);
                        val &= ~(1 << q_num);
@@ -343,9 +349,6 @@ static irqreturn_t cppi41_irq(int irq, void *data)
                        c->residue = pd_trans_len(c->desc->pd6) - len;
                        dma_cookie_complete(&c->txd);
                        dmaengine_desc_get_callback_invoke(&c->txd, NULL);
-
-                       pm_runtime_mark_last_busy(cdd->ddev.dev);
-                       pm_runtime_put_autosuspend(cdd->ddev.dev);
                }
        }
        return IRQ_HANDLED;
@@ -447,6 +450,15 @@ static void push_desc_queue(struct cppi41_channel *c)
         */
        __iowmb();
 
+       /*
+        * DMA transfers can take at least 200ms to complete with USB mass
+        * storage connected. To prevent autosuspend timeouts, we must use
+        * pm_runtime_get/put() when chan_busy[] is modified. This will get
+        * cleared in desc_to_chan() or cppi41_stop_chan() depending on the
+        * outcome of the transfer.
+        */
+       pm_runtime_get(cdd->ddev.dev);
+
        desc_phys = lower_32_bits(c->desc_phys);
        desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc);
        WARN_ON(cdd->chan_busy[desc_num]);
@@ -457,20 +469,26 @@ static void push_desc_queue(struct cppi41_channel *c)
        cppi_writel(reg, cdd->qmgr_mem + QMGR_QUEUE_D(c->q_num));
 }
 
-static void pending_desc(struct cppi41_channel *c)
+/*
+ * Caller must hold cdd->lock to prevent push_desc_queue()
+ * getting called out of order. We have both cppi41_dma_issue_pending()
+ * and cppi41_runtime_resume() call this function.
+ */
+static void cppi41_run_queue(struct cppi41_dd *cdd)
 {
-       struct cppi41_dd *cdd = c->cdd;
-       unsigned long flags;
+       struct cppi41_channel *c, *_c;
 
-       spin_lock_irqsave(&cdd->lock, flags);
-       list_add_tail(&c->node, &cdd->pending);
-       spin_unlock_irqrestore(&cdd->lock, flags);
+       list_for_each_entry_safe(c, _c, &cdd->pending, node) {
+               push_desc_queue(c);
+               list_del(&c->node);
+       }
 }
 
 static void cppi41_dma_issue_pending(struct dma_chan *chan)
 {
        struct cppi41_channel *c = to_cpp41_chan(chan);
        struct cppi41_dd *cdd = c->cdd;
+       unsigned long flags;
        int error;
 
        error = pm_runtime_get(cdd->ddev.dev);
@@ -482,10 +500,11 @@ static void cppi41_dma_issue_pending(struct dma_chan *chan)
                return;
        }
 
-       if (likely(pm_runtime_active(cdd->ddev.dev)))
-               push_desc_queue(c);
-       else
-               pending_desc(c);
+       spin_lock_irqsave(&cdd->lock, flags);
+       list_add_tail(&c->node, &cdd->pending);
+       if (!cdd->is_suspended)
+               cppi41_run_queue(cdd);
+       spin_unlock_irqrestore(&cdd->lock, flags);
 
        pm_runtime_mark_last_busy(cdd->ddev.dev);
        pm_runtime_put_autosuspend(cdd->ddev.dev);
@@ -705,6 +724,9 @@ static int cppi41_stop_chan(struct dma_chan *chan)
        WARN_ON(!cdd->chan_busy[desc_num]);
        cdd->chan_busy[desc_num] = NULL;
 
+       /* Usecount for chan_busy[], paired with push_desc_queue() */
+       pm_runtime_put(cdd->ddev.dev);
+
        return 0;
 }
 
@@ -1150,8 +1172,12 @@ static int __maybe_unused cppi41_resume(struct device *dev)
 static int __maybe_unused cppi41_runtime_suspend(struct device *dev)
 {
        struct cppi41_dd *cdd = dev_get_drvdata(dev);
+       unsigned long flags;
 
+       spin_lock_irqsave(&cdd->lock, flags);
+       cdd->is_suspended = true;
        WARN_ON(!list_empty(&cdd->pending));
+       spin_unlock_irqrestore(&cdd->lock, flags);
 
        return 0;
 }
@@ -1159,14 +1185,11 @@ static int __maybe_unused cppi41_runtime_suspend(struct device *dev)
 static int __maybe_unused cppi41_runtime_resume(struct device *dev)
 {
        struct cppi41_dd *cdd = dev_get_drvdata(dev);
-       struct cppi41_channel *c, *_c;
        unsigned long flags;
 
        spin_lock_irqsave(&cdd->lock, flags);
-       list_for_each_entry_safe(c, _c, &cdd->pending, node) {
-               push_desc_queue(c);
-               list_del(&c->node);
-       }
+       cdd->is_suspended = false;
+       cppi41_run_queue(cdd);
        spin_unlock_irqrestore(&cdd->lock, flags);
 
        return 0;
index 740bbb942594873b08deecb59c801460dcc868ab..f37f4978dabbb2e43dab75d95255cafa398b0c73 100644 (file)
@@ -1699,7 +1699,6 @@ static bool _chan_ns(const struct pl330_dmac *pl330, int i)
 static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330)
 {
        struct pl330_thread *thrd = NULL;
-       unsigned long flags;
        int chans, i;
 
        if (pl330->state == DYING)
@@ -1707,8 +1706,6 @@ static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330)
 
        chans = pl330->pcfg.num_chan;
 
-       spin_lock_irqsave(&pl330->lock, flags);
-
        for (i = 0; i < chans; i++) {
                thrd = &pl330->channels[i];
                if ((thrd->free) && (!_manager_ns(thrd) ||
@@ -1726,8 +1723,6 @@ static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330)
                thrd = NULL;
        }
 
-       spin_unlock_irqrestore(&pl330->lock, flags);
-
        return thrd;
 }
 
@@ -1745,7 +1740,6 @@ static inline void _free_event(struct pl330_thread *thrd, int ev)
 static void pl330_release_channel(struct pl330_thread *thrd)
 {
        struct pl330_dmac *pl330;
-       unsigned long flags;
 
        if (!thrd || thrd->free)
                return;
@@ -1757,10 +1751,8 @@ static void pl330_release_channel(struct pl330_thread *thrd)
 
        pl330 = thrd->dmac;
 
-       spin_lock_irqsave(&pl330->lock, flags);
        _free_event(thrd, thrd->ev);
        thrd->free = true;
-       spin_unlock_irqrestore(&pl330->lock, flags);
 }
 
 /* Initialize the structure for PL330 configuration, that can be used
@@ -1867,9 +1859,10 @@ static int dmac_alloc_resources(struct pl330_dmac *pl330)
         * Alloc MicroCode buffer for 'chans' Channel threads.
         * A channel's buffer offset is (Channel_Id * MCODE_BUFF_PERCHAN)
         */
-       pl330->mcode_cpu = dma_alloc_coherent(pl330->ddma.dev,
+       pl330->mcode_cpu = dma_alloc_attrs(pl330->ddma.dev,
                                chans * pl330->mcbufsz,
-                               &pl330->mcode_bus, GFP_KERNEL);
+                               &pl330->mcode_bus, GFP_KERNEL,
+                               DMA_ATTR_PRIVILEGED);
        if (!pl330->mcode_cpu) {
                dev_err(pl330->ddma.dev, "%s:%d Can't allocate memory!\n",
                        __func__, __LINE__);
@@ -2122,20 +2115,20 @@ static int pl330_alloc_chan_resources(struct dma_chan *chan)
        struct pl330_dmac *pl330 = pch->dmac;
        unsigned long flags;
 
-       spin_lock_irqsave(&pch->lock, flags);
+       spin_lock_irqsave(&pl330->lock, flags);
 
        dma_cookie_init(chan);
        pch->cyclic = false;
 
        pch->thread = pl330_request_channel(pl330);
        if (!pch->thread) {
-               spin_unlock_irqrestore(&pch->lock, flags);
+               spin_unlock_irqrestore(&pl330->lock, flags);
                return -ENOMEM;
        }
 
        tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch);
 
-       spin_unlock_irqrestore(&pch->lock, flags);
+       spin_unlock_irqrestore(&pl330->lock, flags);
 
        return 1;
 }
@@ -2238,12 +2231,13 @@ static int pl330_pause(struct dma_chan *chan)
 static void pl330_free_chan_resources(struct dma_chan *chan)
 {
        struct dma_pl330_chan *pch = to_pchan(chan);
+       struct pl330_dmac *pl330 = pch->dmac;
        unsigned long flags;
 
        tasklet_kill(&pch->task);
 
        pm_runtime_get_sync(pch->dmac->ddma.dev);
-       spin_lock_irqsave(&pch->lock, flags);
+       spin_lock_irqsave(&pl330->lock, flags);
 
        pl330_release_channel(pch->thread);
        pch->thread = NULL;
@@ -2251,7 +2245,7 @@ static void pl330_free_chan_resources(struct dma_chan *chan)
        if (pch->cyclic)
                list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool);
 
-       spin_unlock_irqrestore(&pch->lock, flags);
+       spin_unlock_irqrestore(&pl330->lock, flags);
        pm_runtime_mark_last_busy(pch->dmac->ddma.dev);
        pm_runtime_put_autosuspend(pch->dmac->ddma.dev);
 }
index 26025117783087541af87e31b4e3d57a0e20cf21..82dab1692264d04baeb409622d54bbd895e013ff 100644 (file)
@@ -3065,6 +3065,8 @@ static bool ecc_enabled(struct pci_dev *F3, u16 nid)
                /* Check whether at least one UMC is enabled: */
                if (umc_en_mask)
                        ecc_en = umc_en_mask == ecc_en_mask;
+               else
+                       edac_dbg(0, "Node %d: No enabled UMCs.\n", nid);
 
                /* Assume UMC MCA banks are enabled. */
                nb_mce_en = true;
@@ -3075,14 +3077,15 @@ static bool ecc_enabled(struct pci_dev *F3, u16 nid)
 
                nb_mce_en = nb_mce_bank_enabled_on_node(nid);
                if (!nb_mce_en)
-                       amd64_notice("NB MCE bank disabled, set MSR 0x%08x[4] on node %d to enable.\n",
+                       edac_dbg(0, "NB MCE bank disabled, set MSR 0x%08x[4] on node %d to enable.\n",
                                     MSR_IA32_MCG_CTL, nid);
        }
 
-       amd64_info("DRAM ECC %s.\n", (ecc_en ? "enabled" : "disabled"));
+       amd64_info("Node %d: DRAM ECC %s.\n",
+                  nid, (ecc_en ? "enabled" : "disabled"));
 
        if (!ecc_en || !nb_mce_en) {
-               amd64_notice("%s", ecc_msg);
+               amd64_info("%s", ecc_msg);
                return false;
        }
        return true;
@@ -3300,15 +3303,6 @@ static int init_one_instance(unsigned int nid)
                goto err_add_mc;
        }
 
-       /* register stuff with EDAC MCE */
-       if (report_gart_errors)
-               amd_report_gart_errors(true);
-
-       if (pvt->umc)
-               amd_register_ecc_decoder(decode_umc_error);
-       else
-               amd_register_ecc_decoder(decode_bus_error);
-
        return 0;
 
 err_add_mc:
@@ -3342,7 +3336,7 @@ static int probe_one_instance(unsigned int nid)
        ecc_stngs[nid] = s;
 
        if (!ecc_enabled(F3, nid)) {
-               ret = -ENODEV;
+               ret = 0;
 
                if (!ecc_enable_override)
                        goto err_enable;
@@ -3363,6 +3357,8 @@ static int probe_one_instance(unsigned int nid)
 
                if (boot_cpu_data.x86 < 0x17)
                        restore_ecc_error_reporting(s, nid, F3);
+
+               goto err_enable;
        }
 
        return ret;
@@ -3396,14 +3392,6 @@ static void remove_one_instance(unsigned int nid)
 
        free_mc_sibling_devs(pvt);
 
-       /* unregister from EDAC MCE */
-       amd_report_gart_errors(false);
-
-       if (pvt->umc)
-               amd_unregister_ecc_decoder(decode_umc_error);
-       else
-               amd_unregister_ecc_decoder(decode_bus_error);
-
        kfree(ecc_stngs[nid]);
        ecc_stngs[nid] = NULL;
 
@@ -3452,8 +3440,11 @@ static int __init amd64_edac_init(void)
        int err = -ENODEV;
        int i;
 
+       if (!x86_match_cpu(amd64_cpuids))
+               return -ENODEV;
+
        if (amd_cache_northbridges() < 0)
-               goto err_ret;
+               return -ENODEV;
 
        opstate_init();
 
@@ -3466,14 +3457,30 @@ static int __init amd64_edac_init(void)
        if (!msrs)
                goto err_free;
 
-       for (i = 0; i < amd_nb_num(); i++)
-               if (probe_one_instance(i)) {
+       for (i = 0; i < amd_nb_num(); i++) {
+               err = probe_one_instance(i);
+               if (err) {
                        /* unwind properly */
                        while (--i >= 0)
                                remove_one_instance(i);
 
                        goto err_pci;
                }
+       }
+
+       if (!edac_has_mcs()) {
+               err = -ENODEV;
+               goto err_pci;
+       }
+
+       /* register stuff with EDAC MCE */
+       if (report_gart_errors)
+               amd_report_gart_errors(true);
+
+       if (boot_cpu_data.x86 >= 0x17)
+               amd_register_ecc_decoder(decode_umc_error);
+       else
+               amd_register_ecc_decoder(decode_bus_error);
 
        setup_pci_device();
 
@@ -3493,7 +3500,6 @@ err_free:
        kfree(ecc_stngs);
        ecc_stngs = NULL;
 
-err_ret:
        return err;
 }
 
@@ -3504,6 +3510,14 @@ static void __exit amd64_edac_exit(void)
        if (pci_ctl)
                edac_pci_release_generic_ctl(pci_ctl);
 
+       /* unregister from EDAC MCE */
+       amd_report_gart_errors(false);
+
+       if (boot_cpu_data.x86 >= 0x17)
+               amd_unregister_ecc_decoder(decode_umc_error);
+       else
+               amd_unregister_ecc_decoder(decode_bus_error);
+
        for (i = 0; i < amd_nb_num(); i++)
                remove_one_instance(i);
 
index 496603d8f3d20f9e315fb4d99b66a762cf941856..1d4b74e9a037f70bd68bd8e8c615976691c52f9c 100644 (file)
 #include <linux/slab.h>
 #include <linux/mmzone.h>
 #include <linux/edac.h>
+#include <asm/cpu_device_id.h>
 #include <asm/msr.h>
 #include "edac_module.h"
 #include "mce_amd.h"
 
-#define amd64_debug(fmt, arg...) \
-       edac_printk(KERN_DEBUG, "amd64", fmt, ##arg)
-
 #define amd64_info(fmt, arg...) \
        edac_printk(KERN_INFO, "amd64", fmt, ##arg)
 
-#define amd64_notice(fmt, arg...) \
-       edac_printk(KERN_NOTICE, "amd64", fmt, ##arg)
-
 #define amd64_warn(fmt, arg...) \
        edac_printk(KERN_WARNING, "amd64", "Warning: " fmt, ##arg)
 
@@ -90,7 +85,7 @@
  *         sections 3.5.4 and 3.5.5 for more information.
  */
 
-#define EDAC_AMD64_VERSION             "3.4.0"
+#define EDAC_AMD64_VERSION             "3.5.0"
 #define EDAC_MOD_STR                   "amd64_edac"
 
 /* Extended Model from CPUID, for CPU Revision numbers */
index 750891ea07de1573aba2d05a31bf8775b7d5ddf7..e5573c56b15e092df0eb0edf5d0d27490910f287 100644 (file)
@@ -453,6 +453,20 @@ void edac_mc_free(struct mem_ctl_info *mci)
 }
 EXPORT_SYMBOL_GPL(edac_mc_free);
 
+bool edac_has_mcs(void)
+{
+       bool ret;
+
+       mutex_lock(&mem_ctls_mutex);
+
+       ret = list_empty(&mc_devices);
+
+       mutex_unlock(&mem_ctls_mutex);
+
+       return !ret;
+}
+EXPORT_SYMBOL_GPL(edac_has_mcs);
+
 /* Caller must hold mem_ctls_mutex */
 static struct mem_ctl_info *__find_mci_by_dev(struct device *dev)
 {
index 50fc1dc9c0d837b2d661c1d6af1e3f2ff563adff..5357800e418d344b32d21d200c523803cbbcecbb 100644 (file)
@@ -148,6 +148,15 @@ extern int edac_mc_add_mc_with_groups(struct mem_ctl_info *mci,
  */
 extern void edac_mc_free(struct mem_ctl_info *mci);
 
+/**
+ * edac_has_mcs() - Check if any MCs have been allocated.
+ *
+ * Returns:
+ *     True if MC instances have been registered successfully.
+ *     False otherwise.
+ */
+extern bool edac_has_mcs(void);
+
 /**
  * edac_mc_find() - Search for a mem_ctl_info structure whose index is @idx.
  *
index 39dbab7d62f150b7dd53a0657bb8beb3622bd762..445862dac273982147e7dc0464796fde6e34e129 100644 (file)
@@ -569,6 +569,40 @@ static ssize_t dimmdev_edac_mode_show(struct device *dev,
        return sprintf(data, "%s\n", edac_caps[dimm->edac_mode]);
 }
 
+static ssize_t dimmdev_ce_count_show(struct device *dev,
+                                     struct device_attribute *mattr,
+                                     char *data)
+{
+       struct dimm_info *dimm = to_dimm(dev);
+       u32 count;
+       int off;
+
+       off = EDAC_DIMM_OFF(dimm->mci->layers,
+                           dimm->mci->n_layers,
+                           dimm->location[0],
+                           dimm->location[1],
+                           dimm->location[2]);
+       count = dimm->mci->ce_per_layer[dimm->mci->n_layers-1][off];
+       return sprintf(data, "%u\n", count);
+}
+
+static ssize_t dimmdev_ue_count_show(struct device *dev,
+                                     struct device_attribute *mattr,
+                                     char *data)
+{
+       struct dimm_info *dimm = to_dimm(dev);
+       u32 count;
+       int off;
+
+       off = EDAC_DIMM_OFF(dimm->mci->layers,
+                           dimm->mci->n_layers,
+                           dimm->location[0],
+                           dimm->location[1],
+                           dimm->location[2]);
+       count = dimm->mci->ue_per_layer[dimm->mci->n_layers-1][off];
+       return sprintf(data, "%u\n", count);
+}
+
 /* dimm/rank attribute files */
 static DEVICE_ATTR(dimm_label, S_IRUGO | S_IWUSR,
                   dimmdev_label_show, dimmdev_label_store);
@@ -577,6 +611,8 @@ static DEVICE_ATTR(size, S_IRUGO, dimmdev_size_show, NULL);
 static DEVICE_ATTR(dimm_mem_type, S_IRUGO, dimmdev_mem_type_show, NULL);
 static DEVICE_ATTR(dimm_dev_type, S_IRUGO, dimmdev_dev_type_show, NULL);
 static DEVICE_ATTR(dimm_edac_mode, S_IRUGO, dimmdev_edac_mode_show, NULL);
+static DEVICE_ATTR(dimm_ce_count, S_IRUGO, dimmdev_ce_count_show, NULL);
+static DEVICE_ATTR(dimm_ue_count, S_IRUGO, dimmdev_ue_count_show, NULL);
 
 /* attributes of the dimm<id>/rank<id> object */
 static struct attribute *dimm_attrs[] = {
@@ -586,6 +622,8 @@ static struct attribute *dimm_attrs[] = {
        &dev_attr_dimm_mem_type.attr,
        &dev_attr_dimm_dev_type.attr,
        &dev_attr_dimm_edac_mode.attr,
+       &dev_attr_dimm_ce_count.attr,
+       &dev_attr_dimm_ue_count.attr,
        NULL,
 };
 
@@ -831,7 +869,7 @@ static DEVICE_ATTR(ce_count, S_IRUGO, mci_ce_count_show, NULL);
 static DEVICE_ATTR(max_location, S_IRUGO, mci_max_location_show, NULL);
 
 /* memory scrubber attribute file */
-DEVICE_ATTR(sdram_scrub_rate, 0, mci_sdram_scrub_rate_show,
+static DEVICE_ATTR(sdram_scrub_rate, 0, mci_sdram_scrub_rate_show,
            mci_sdram_scrub_rate_store); /* umode set later in is_visible */
 
 static struct attribute *mci_attrs[] = {
index 4e9608a958e78a998ac7b858b857e0c13a98e4fb..efc8276d1d9cc83610a84d5fd6740bf8cd638bd4 100644 (file)
@@ -145,12 +145,12 @@ static ssize_t fsl_mc_inject_ctrl_store(struct device *dev,
        return 0;
 }
 
-DEVICE_ATTR(inject_data_hi, S_IRUGO | S_IWUSR,
-           fsl_mc_inject_data_hi_show, fsl_mc_inject_data_hi_store);
-DEVICE_ATTR(inject_data_lo, S_IRUGO | S_IWUSR,
-           fsl_mc_inject_data_lo_show, fsl_mc_inject_data_lo_store);
-DEVICE_ATTR(inject_ctrl, S_IRUGO | S_IWUSR,
-           fsl_mc_inject_ctrl_show, fsl_mc_inject_ctrl_store);
+static DEVICE_ATTR(inject_data_hi, S_IRUGO | S_IWUSR,
+                  fsl_mc_inject_data_hi_show, fsl_mc_inject_data_hi_store);
+static DEVICE_ATTR(inject_data_lo, S_IRUGO | S_IWUSR,
+                  fsl_mc_inject_data_lo_show, fsl_mc_inject_data_lo_store);
+static DEVICE_ATTR(inject_ctrl, S_IRUGO | S_IWUSR,
+                  fsl_mc_inject_ctrl_show, fsl_mc_inject_ctrl_store);
 
 static struct attribute *fsl_ddr_dev_attrs[] = {
        &dev_attr_inject_data_hi.attr,
index 0a912bf6de005f53e4484a5bfaf327f4b8cc2322..e391f5a716bed33cc0dcbb6533882e2d08eab7cd 100644 (file)
@@ -304,7 +304,6 @@ static const char *ferr_global_lo_name[] = {
 #define REDMEMA                0xdc
 
 #define REDMEMB                0x7c
-  #define IS_SECOND_CH(v)      ((v) * (1 << 17))
 
 #define RECMEMA                0xe0
   #define RECMEMA_BANK(v)      (((v) >> 12) & 7)
@@ -483,8 +482,9 @@ static void i7300_process_fbd_error(struct mem_ctl_info *mci)
                pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
                                     REDMEMB, &value);
                channel = (branch << 1);
-               if (IS_SECOND_CH(value))
-                       channel++;
+
+               /* Second channel ? */
+               channel += !!(value & BIT(17));
 
                /* Clear the error bit */
                pci_write_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
index 69b5adead0ad63e1ff5ea89a589715c5921f3c49..75ad847593b79561dd469273f93109fe68be573a 100644 (file)
@@ -1835,6 +1835,7 @@ static int i7core_mce_check_error(struct notifier_block *nb, unsigned long val,
 
 static struct notifier_block i7_mce_dec = {
        .notifier_call  = i7core_mce_check_error,
+       .priority       = MCE_PRIO_EDAC,
 };
 
 struct memdev_dmi_entry {
index 7baa8ace267bb3350f93a4ba8b1eca91537183e8..9dcdab28f66535f9fadf8ad116eca7d059fd2f06 100644 (file)
@@ -494,6 +494,10 @@ static int i82975x_probe1(struct pci_dev *pdev, int dev_idx)
        }
        mchbar &= 0xffffc000;   /* bits 31:14 used for 16K window */
        mch_window = ioremap_nocache(mchbar, 0x1000);
+       if (!mch_window) {
+               edac_dbg(3, "error ioremapping MCHBAR!\n");
+               goto fail0;
+       }
 
 #ifdef i82975x_DEBUG_IOMEM
        i82975x_printk(KERN_INFO, "MCHBAR real = %0x, remapped = %p\n",
index 34208f38c5b1ba46aeb5077e8b536bf55609efbc..ba35b7ea3686027dd463ce1e893552e02a70106a 100644 (file)
@@ -937,12 +937,13 @@ static const char *decode_error_status(struct mce *m)
        }
 
        if (m->status & MCI_STATUS_DEFERRED)
-               return "Deferred error.";
+               return "Deferred error, no action required.";
 
        return "Corrected error, no action required.";
 }
 
-int amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data)
+static int
+amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data)
 {
        struct mce *m = (struct mce *)data;
        struct cpuinfo_x86 *c = &cpu_data(m->extcpu);
@@ -991,20 +992,22 @@ int amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data)
        pr_cont("]: 0x%016llx\n", m->status);
 
        if (m->status & MCI_STATUS_ADDRV)
-               pr_emerg(HW_ERR "Error Addr: 0x%016llx", m->addr);
+               pr_emerg(HW_ERR "Error Addr: 0x%016llx\n", m->addr);
 
        if (boot_cpu_has(X86_FEATURE_SMCA)) {
+               pr_emerg(HW_ERR "IPID: 0x%016llx", m->ipid);
+
                if (m->status & MCI_STATUS_SYNDV)
                        pr_cont(", Syndrome: 0x%016llx", m->synd);
 
-               pr_cont(", IPID: 0x%016llx", m->ipid);
-
                pr_cont("\n");
 
                decode_smca_errors(m);
                goto err_code;
-       } else
-               pr_cont("\n");
+       }
+
+       if (m->tsc)
+               pr_emerg(HW_ERR "TSC: %llu\n", m->tsc);
 
        if (!fam_ops)
                goto err_code;
@@ -1047,10 +1050,10 @@ int amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data)
 
        return NOTIFY_STOP;
 }
-EXPORT_SYMBOL_GPL(amd_decode_mce);
 
 static struct notifier_block amd_mce_dec_nb = {
        .notifier_call  = amd_decode_mce,
+       .priority       = MCE_PRIO_EDAC,
 };
 
 static int __init mce_amd_init(void)
index c2359a1ea6b300443f750624fd4cb6beb295d05c..0b6a68673e0e3a977ef3e9d9ecb600125b5dbade 100644 (file)
@@ -79,6 +79,5 @@ struct amd_decoder_ops {
 void amd_report_gart_errors(bool);
 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
 void amd_unregister_ecc_decoder(void (*f)(int, struct mce *));
-int amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data);
 
 #endif /* _EDAC_MCE_AMD_H */
index 8f66cbed70b75b97bdf0492f1f0b0599911649a1..67f7bc3fe5b3b623b108fddc8cb57ec6ba304ade 100644 (file)
@@ -629,6 +629,7 @@ static const struct of_device_id mpc85xx_l2_err_of_match[] = {
        { .compatible = "fsl,p1020-l2-cache-controller", },
        { .compatible = "fsl,p1021-l2-cache-controller", },
        { .compatible = "fsl,p2020-l2-cache-controller", },
+       { .compatible = "fsl,t2080-l2-cache-controller", },
        {},
 };
 MODULE_DEVICE_TABLE(of, mpc85xx_l2_err_of_match);
index 54ae6dc45ab2216343b0c542765537889332a0fb..a65ea44e3b0bf66294c0c32effb774c9d3c23622 100644 (file)
@@ -304,7 +304,6 @@ struct sbridge_info {
        u64             (*rir_limit)(u32 reg);
        u64             (*sad_limit)(u32 reg);
        u32             (*interleave_mode)(u32 reg);
-       char*           (*show_interleave_mode)(u32 reg);
        u32             (*dram_attr)(u32 reg);
        const u32       *dram_rule;
        const u32       *interleave_list;
@@ -811,11 +810,6 @@ static u32 interleave_mode(u32 reg)
        return GET_BITFIELD(reg, 1, 1);
 }
 
-char *show_interleave_mode(u32 reg)
-{
-       return interleave_mode(reg) ? "8:6" : "[8:6]XOR[18:16]";
-}
-
 static u32 dram_attr(u32 reg)
 {
        return GET_BITFIELD(reg, 2, 3);
@@ -831,29 +825,16 @@ static u32 knl_interleave_mode(u32 reg)
        return GET_BITFIELD(reg, 1, 2);
 }
 
-static char *knl_show_interleave_mode(u32 reg)
-{
-       char *s;
-
-       switch (knl_interleave_mode(reg)) {
-       case 0:
-               s = "use address bits [8:6]";
-               break;
-       case 1:
-               s = "use address bits [10:8]";
-               break;
-       case 2:
-               s = "use address bits [14:12]";
-               break;
-       case 3:
-               s = "use address bits [32:30]";
-               break;
-       default:
-               WARN_ON(1);
-               break;
-       }
+static const char * const knl_intlv_mode[] = {
+       "[8:6]", "[10:8]", "[14:12]", "[32:30]"
+};
 
-       return s;
+static const char *get_intlv_mode_str(u32 reg, enum type t)
+{
+       if (t == KNIGHTS_LANDING)
+               return knl_intlv_mode[knl_interleave_mode(reg)];
+       else
+               return interleave_mode(reg) ? "[8:6]" : "[8:6]XOR[18:16]";
 }
 
 static u32 dram_attr_knl(u32 reg)
@@ -1810,7 +1791,7 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
                         show_dram_attr(pvt->info.dram_attr(reg)),
                         gb, (mb*1000)/1024,
                         ((u64)tmp_mb) << 20L,
-                        pvt->info.show_interleave_mode(reg),
+                        get_intlv_mode_str(reg, pvt->info.type),
                         reg);
                prv = limit;
 
@@ -3136,7 +3117,8 @@ static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val,
 }
 
 static struct notifier_block sbridge_mce_dec = {
-       .notifier_call      = sbridge_mce_check_error,
+       .notifier_call  = sbridge_mce_check_error,
+       .priority       = MCE_PRIO_EDAC,
 };
 
 /****************************************************************************
@@ -3227,7 +3209,6 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
                pvt->info.rir_limit = rir_limit;
                pvt->info.sad_limit = sad_limit;
                pvt->info.interleave_mode = interleave_mode;
-               pvt->info.show_interleave_mode = show_interleave_mode;
                pvt->info.dram_attr = dram_attr;
                pvt->info.max_sad = ARRAY_SIZE(ibridge_dram_rule);
                pvt->info.interleave_list = ibridge_interleave_list;
@@ -3251,7 +3232,6 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
                pvt->info.rir_limit = rir_limit;
                pvt->info.sad_limit = sad_limit;
                pvt->info.interleave_mode = interleave_mode;
-               pvt->info.show_interleave_mode = show_interleave_mode;
                pvt->info.dram_attr = dram_attr;
                pvt->info.max_sad = ARRAY_SIZE(sbridge_dram_rule);
                pvt->info.interleave_list = sbridge_interleave_list;
@@ -3275,7 +3255,6 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
                pvt->info.rir_limit = haswell_rir_limit;
                pvt->info.sad_limit = sad_limit;
                pvt->info.interleave_mode = interleave_mode;
-               pvt->info.show_interleave_mode = show_interleave_mode;
                pvt->info.dram_attr = dram_attr;
                pvt->info.max_sad = ARRAY_SIZE(ibridge_dram_rule);
                pvt->info.interleave_list = ibridge_interleave_list;
@@ -3299,7 +3278,6 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
                pvt->info.rir_limit = haswell_rir_limit;
                pvt->info.sad_limit = sad_limit;
                pvt->info.interleave_mode = interleave_mode;
-               pvt->info.show_interleave_mode = show_interleave_mode;
                pvt->info.dram_attr = dram_attr;
                pvt->info.max_sad = ARRAY_SIZE(ibridge_dram_rule);
                pvt->info.interleave_list = ibridge_interleave_list;
@@ -3323,7 +3301,6 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
                pvt->info.rir_limit = NULL;
                pvt->info.sad_limit = knl_sad_limit;
                pvt->info.interleave_mode = knl_interleave_mode;
-               pvt->info.show_interleave_mode = knl_show_interleave_mode;
                pvt->info.dram_attr = dram_attr_knl;
                pvt->info.max_sad = ARRAY_SIZE(knl_dram_rule);
                pvt->info.interleave_list = knl_interleave_list;
index 79ef675e4d6f0525478d9980477e6a42c18d5b33..1159dba4671fef926afce7164cd278faf23c2e24 100644 (file)
@@ -1007,7 +1007,8 @@ static int skx_mce_check_error(struct notifier_block *nb, unsigned long val,
 }
 
 static struct notifier_block skx_mce_dec = {
-       .notifier_call = skx_mce_check_error,
+       .notifier_call  = skx_mce_check_error,
+       .priority       = MCE_PRIO_EDAC,
 };
 
 static void skx_remove(void)
index f853ad2c4ca0a7d42d888cbd95844d6b132eb925..1027d7b44358d3b5806e8dcd58c65fecdcab9664 100644 (file)
@@ -250,7 +250,6 @@ void __init efi_init(void)
        }
 
        reserve_regions();
-       efi_memattr_init();
        efi_esrt_init();
        efi_memmap_unmap();
 
index 92914801e3888775708a2c3843f5a6577529d9d0..e7d404059b7316a5c5668f609ceb5957fdcdd97d 100644 (file)
@@ -529,6 +529,8 @@ int __init efi_config_parse_tables(void *config_tables, int count, int sz,
                }
        }
 
+       efi_memattr_init();
+
        /* Parse the EFI Properties table if it exists */
        if (efi.properties_table != EFI_INVALID_TABLE_ADDR) {
                efi_properties_table_t *tbl;
index 14914074f716ed41cc3fccc42c1e333810ecf890..08b026864d4e7d5f00b76cbd95b7398c6db943f8 100644 (file)
@@ -269,7 +269,7 @@ void __init efi_esrt_init(void)
        max -= efi.esrt;
 
        if (max < size) {
-               pr_err("ESRT header doen't fit on single memory map entry. (size: %zu max: %zu)\n",
+               pr_err("ESRT header doesn't fit on single memory map entry. (size: %zu max: %zu)\n",
                       size, max);
                return;
        }
index d564d25df8abf14c8b00cafe861fed063e09af4b..f7425960f6a57d4d4048e905d4d6b101b333c114 100644 (file)
@@ -11,7 +11,7 @@ cflags-$(CONFIG_X86)          += -m$(BITS) -D__KERNEL__ -O2 \
                                   -mno-mmx -mno-sse
 
 cflags-$(CONFIG_ARM64)         := $(subst -pg,,$(KBUILD_CFLAGS))
-cflags-$(CONFIG_ARM)           := $(subst -pg,,$(KBUILD_CFLAGS)) -g0 \
+cflags-$(CONFIG_ARM)           := $(subst -pg,,$(KBUILD_CFLAGS)) \
                                   -fno-builtin -fpic -mno-single-pic-base
 
 cflags-$(CONFIG_EFI_ARMSTUB)   += -I$(srctree)/scripts/dtc/libfdt
@@ -28,7 +28,7 @@ OBJECT_FILES_NON_STANDARD     := y
 # Prevents link failures: __sanitizer_cov_trace_pc() is not linked in.
 KCOV_INSTRUMENT                        := n
 
-lib-y                          := efi-stub-helper.o gop.o
+lib-y                          := efi-stub-helper.o gop.o secureboot.o
 
 # include the stub's generic dependencies from lib/ when building for ARM/arm64
 arm-deps := fdt_rw.c fdt_ro.c fdt_wip.c fdt.c fdt_empty_tree.c fdt_sw.c sort.c
@@ -60,7 +60,7 @@ CFLAGS_arm64-stub.o           := -DTEXT_OFFSET=$(TEXT_OFFSET)
 extra-$(CONFIG_EFI_ARMSTUB)    := $(lib-y)
 lib-$(CONFIG_EFI_ARMSTUB)      := $(patsubst %.o,%.stub.o,$(lib-y))
 
-STUBCOPY_FLAGS-y               := -R .debug* -R *ksymtab* -R *kcrctab*
+STUBCOPY_RM-y                  := -R *ksymtab* -R *kcrctab*
 STUBCOPY_FLAGS-$(CONFIG_ARM64) += --prefix-alloc-sections=.init \
                                   --prefix-symbols=__efistub_
 STUBCOPY_RELOC-$(CONFIG_ARM64) := R_AARCH64_ABS
@@ -68,17 +68,25 @@ STUBCOPY_RELOC-$(CONFIG_ARM64)      := R_AARCH64_ABS
 $(obj)/%.stub.o: $(obj)/%.o FORCE
        $(call if_changed,stubcopy)
 
+#
+# Strip debug sections and some other sections that may legally contain
+# absolute relocations, so that we can inspect the remaining sections for
+# such relocations. If none are found, regenerate the output object, but
+# this time, use objcopy and leave all sections in place.
+#
 quiet_cmd_stubcopy = STUBCPY $@
-      cmd_stubcopy = if $(OBJCOPY) $(STUBCOPY_FLAGS-y) $< $@; then     \
-                    $(OBJDUMP) -r $@ | grep $(STUBCOPY_RELOC-y)        \
-                    && (echo >&2 "$@: absolute symbol references not allowed in the EFI stub"; \
-                        rm -f $@; /bin/false); else /bin/false; fi
+      cmd_stubcopy = if $(STRIP) --strip-debug $(STUBCOPY_RM-y) -o $@ $<; \
+                    then if $(OBJDUMP) -r $@ | grep $(STUBCOPY_RELOC-y); \
+                    then (echo >&2 "$@: absolute symbol references not allowed in the EFI stub"; \
+                          rm -f $@; /bin/false);                         \
+                    else $(OBJCOPY) $(STUBCOPY_FLAGS-y) $< $@; fi        \
+                    else /bin/false; fi
 
 #
 # ARM discards the .data section because it disallows r/w data in the
 # decompressor. So move our .data to .data.efistub, which is preserved
 # explicitly by the decompressor linker script.
 #
-STUBCOPY_FLAGS-$(CONFIG_ARM)   += --rename-section .data=.data.efistub \
-                                  -R ___ksymtab+sort -R ___kcrctab+sort
+STUBCOPY_FLAGS-$(CONFIG_ARM)   += --rename-section .data=.data.efistub
+STUBCOPY_RM-$(CONFIG_ARM)      += -R ___ksymtab+sort -R ___kcrctab+sort
 STUBCOPY_RELOC-$(CONFIG_ARM)   := R_ARM_ABS
index b4f7d78f9e8bdeafb4e7bb408f3648aa9cacd45d..d4056c6be1ec0f029b7c0c457e34ab570356f0c6 100644 (file)
 
 bool __nokaslr;
 
-static int efi_get_secureboot(efi_system_table_t *sys_table_arg)
-{
-       static efi_char16_t const sb_var_name[] = {
-               'S', 'e', 'c', 'u', 'r', 'e', 'B', 'o', 'o', 't', 0 };
-       static efi_char16_t const sm_var_name[] = {
-               'S', 'e', 't', 'u', 'p', 'M', 'o', 'd', 'e', 0 };
-
-       efi_guid_t var_guid = EFI_GLOBAL_VARIABLE_GUID;
-       efi_get_variable_t *f_getvar = sys_table_arg->runtime->get_variable;
-       u8 val;
-       unsigned long size = sizeof(val);
-       efi_status_t status;
-
-       status = f_getvar((efi_char16_t *)sb_var_name, (efi_guid_t *)&var_guid,
-                         NULL, &size, &val);
-
-       if (status != EFI_SUCCESS)
-               goto out_efi_err;
-
-       if (val == 0)
-               return 0;
-
-       status = f_getvar((efi_char16_t *)sm_var_name, (efi_guid_t *)&var_guid,
-                         NULL, &size, &val);
-
-       if (status != EFI_SUCCESS)
-               goto out_efi_err;
-
-       if (val == 1)
-               return 0;
-
-       return 1;
-
-out_efi_err:
-       switch (status) {
-       case EFI_NOT_FOUND:
-               return 0;
-       case EFI_DEVICE_ERROR:
-               return -EIO;
-       case EFI_SECURITY_VIOLATION:
-               return -EACCES;
-       default:
-               return -EINVAL;
-       }
-}
-
 efi_status_t efi_open_volume(efi_system_table_t *sys_table_arg,
                             void *__image, void **__fh)
 {
@@ -91,75 +45,6 @@ efi_status_t efi_open_volume(efi_system_table_t *sys_table_arg,
        return status;
 }
 
-efi_status_t efi_file_close(void *handle)
-{
-       efi_file_handle_t *fh = handle;
-
-       return fh->close(handle);
-}
-
-efi_status_t
-efi_file_read(void *handle, unsigned long *size, void *addr)
-{
-       efi_file_handle_t *fh = handle;
-
-       return fh->read(handle, size, addr);
-}
-
-
-efi_status_t
-efi_file_size(efi_system_table_t *sys_table_arg, void *__fh,
-             efi_char16_t *filename_16, void **handle, u64 *file_sz)
-{
-       efi_file_handle_t *h, *fh = __fh;
-       efi_file_info_t *info;
-       efi_status_t status;
-       efi_guid_t info_guid = EFI_FILE_INFO_ID;
-       unsigned long info_sz;
-
-       status = fh->open(fh, &h, filename_16, EFI_FILE_MODE_READ, (u64)0);
-       if (status != EFI_SUCCESS) {
-               efi_printk(sys_table_arg, "Failed to open file: ");
-               efi_char16_printk(sys_table_arg, filename_16);
-               efi_printk(sys_table_arg, "\n");
-               return status;
-       }
-
-       *handle = h;
-
-       info_sz = 0;
-       status = h->get_info(h, &info_guid, &info_sz, NULL);
-       if (status != EFI_BUFFER_TOO_SMALL) {
-               efi_printk(sys_table_arg, "Failed to get file info size\n");
-               return status;
-       }
-
-grow:
-       status = sys_table_arg->boottime->allocate_pool(EFI_LOADER_DATA,
-                                info_sz, (void **)&info);
-       if (status != EFI_SUCCESS) {
-               efi_printk(sys_table_arg, "Failed to alloc mem for file info\n");
-               return status;
-       }
-
-       status = h->get_info(h, &info_guid, &info_sz,
-                                                  info);
-       if (status == EFI_BUFFER_TOO_SMALL) {
-               sys_table_arg->boottime->free_pool(info);
-               goto grow;
-       }
-
-       *file_sz = info->file_size;
-       sys_table_arg->boottime->free_pool(info);
-
-       if (status != EFI_SUCCESS)
-               efi_printk(sys_table_arg, "Failed to get initrd info\n");
-
-       return status;
-}
-
-
-
 void efi_char16_printk(efi_system_table_t *sys_table_arg,
                              efi_char16_t *str)
 {
@@ -226,7 +111,7 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
        efi_guid_t loaded_image_proto = LOADED_IMAGE_PROTOCOL_GUID;
        unsigned long reserve_addr = 0;
        unsigned long reserve_size = 0;
-       int secure_boot = 0;
+       enum efi_secureboot_mode secure_boot;
        struct screen_info *si;
 
        /* Check if we were booted by the EFI firmware */
@@ -296,19 +181,14 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
                pr_efi_err(sys_table, "Failed to parse EFI cmdline options\n");
 
        secure_boot = efi_get_secureboot(sys_table);
-       if (secure_boot > 0)
-               pr_efi(sys_table, "UEFI Secure Boot is enabled.\n");
-
-       if (secure_boot < 0) {
-               pr_efi_err(sys_table,
-                       "could not determine UEFI Secure Boot status.\n");
-       }
 
        /*
-        * Unauthenticated device tree data is a security hazard, so
-        * ignore 'dtb=' unless UEFI Secure Boot is disabled.
+        * Unauthenticated device tree data is a security hazard, so ignore
+        * 'dtb=' unless UEFI Secure Boot is disabled.  We assume that secure
+        * boot is enabled if we can't determine its state.
         */
-       if (secure_boot != 0 && strstr(cmdline_ptr, "dtb=")) {
+       if (secure_boot != efi_secureboot_mode_disabled &&
+           strstr(cmdline_ptr, "dtb=")) {
                pr_efi(sys_table, "Ignoring DTB from command line.\n");
        } else {
                status = handle_cmdline_files(sys_table, image, cmdline_ptr,
index 757badc1debbedbec5ecaa64c724c85b3b2e7632..919822b7773d5df20cd6009e3f336d48dfa2e191 100644 (file)
@@ -338,6 +338,69 @@ void efi_free(efi_system_table_t *sys_table_arg, unsigned long size,
        efi_call_early(free_pages, addr, nr_pages);
 }
 
+static efi_status_t efi_file_size(efi_system_table_t *sys_table_arg, void *__fh,
+                                 efi_char16_t *filename_16, void **handle,
+                                 u64 *file_sz)
+{
+       efi_file_handle_t *h, *fh = __fh;
+       efi_file_info_t *info;
+       efi_status_t status;
+       efi_guid_t info_guid = EFI_FILE_INFO_ID;
+       unsigned long info_sz;
+
+       status = efi_call_proto(efi_file_handle, open, fh, &h, filename_16,
+                               EFI_FILE_MODE_READ, (u64)0);
+       if (status != EFI_SUCCESS) {
+               efi_printk(sys_table_arg, "Failed to open file: ");
+               efi_char16_printk(sys_table_arg, filename_16);
+               efi_printk(sys_table_arg, "\n");
+               return status;
+       }
+
+       *handle = h;
+
+       info_sz = 0;
+       status = efi_call_proto(efi_file_handle, get_info, h, &info_guid,
+                               &info_sz, NULL);
+       if (status != EFI_BUFFER_TOO_SMALL) {
+               efi_printk(sys_table_arg, "Failed to get file info size\n");
+               return status;
+       }
+
+grow:
+       status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
+                               info_sz, (void **)&info);
+       if (status != EFI_SUCCESS) {
+               efi_printk(sys_table_arg, "Failed to alloc mem for file info\n");
+               return status;
+       }
+
+       status = efi_call_proto(efi_file_handle, get_info, h, &info_guid,
+                               &info_sz, info);
+       if (status == EFI_BUFFER_TOO_SMALL) {
+               efi_call_early(free_pool, info);
+               goto grow;
+       }
+
+       *file_sz = info->file_size;
+       efi_call_early(free_pool, info);
+
+       if (status != EFI_SUCCESS)
+               efi_printk(sys_table_arg, "Failed to get initrd info\n");
+
+       return status;
+}
+
+static efi_status_t efi_file_read(void *handle, unsigned long *size, void *addr)
+{
+       return efi_call_proto(efi_file_handle, read, handle, size, addr);
+}
+
+static efi_status_t efi_file_close(void *handle)
+{
+       return efi_call_proto(efi_file_handle, close, handle);
+}
+
 /*
  * Parse the ASCII string 'cmdline' for EFI options, denoted by the efi=
  * option, e.g. efi=nochunk.
@@ -350,6 +413,14 @@ efi_status_t efi_parse_options(char *cmdline)
 {
        char *str;
 
+       /*
+        * Currently, the only efi= option we look for is 'nochunk', which
+        * is intended to work around known issues on certain x86 UEFI
+        * versions. So ignore for now on other architectures.
+        */
+       if (!IS_ENABLED(CONFIG_X86))
+               return EFI_SUCCESS;
+
        /*
         * If no EFI parameters were specified on the cmdline we've got
         * nothing to do.
@@ -523,7 +594,8 @@ efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
                        size = files[j].size;
                        while (size) {
                                unsigned long chunksize;
-                               if (size > __chunk_size)
+
+                               if (IS_ENABLED(CONFIG_X86) && size > __chunk_size)
                                        chunksize = __chunk_size;
                                else
                                        chunksize = size;
index 0e2a96b12cb3647635db19912ec0ab4004f71572..71c4d0e3c4ede196cffeebec1ccef97e868b3f26 100644 (file)
@@ -29,14 +29,6 @@ void efi_char16_printk(efi_system_table_t *, efi_char16_t *);
 efi_status_t efi_open_volume(efi_system_table_t *sys_table_arg, void *__image,
                             void **__fh);
 
-efi_status_t efi_file_size(efi_system_table_t *sys_table_arg, void *__fh,
-                          efi_char16_t *filename_16, void **handle,
-                          u64 *file_sz);
-
-efi_status_t efi_file_read(void *handle, unsigned long *size, void *addr);
-
-efi_status_t efi_file_close(void *handle);
-
 unsigned long get_dram_base(efi_system_table_t *sys_table_arg);
 
 efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
index 921dfa047202952c9064cd39971e68e0e3c28b49..260c4b4b492ec38735715859522068da40c21381 100644 (file)
@@ -187,6 +187,7 @@ static efi_status_t update_fdt_memmap(void *fdt, struct efi_boot_memmap *map)
 struct exit_boot_struct {
        efi_memory_desc_t *runtime_map;
        int *runtime_entry_count;
+       void *new_fdt_addr;
 };
 
 static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
@@ -202,7 +203,7 @@ static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
        efi_get_virtmap(*map->map, *map->map_size, *map->desc_size,
                        p->runtime_map, p->runtime_entry_count);
 
-       return EFI_SUCCESS;
+       return update_fdt_memmap(p->new_fdt_addr, map);
 }
 
 /*
@@ -300,22 +301,13 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
 
        priv.runtime_map = runtime_map;
        priv.runtime_entry_count = &runtime_entry_count;
+       priv.new_fdt_addr = (void *)*new_fdt_addr;
        status = efi_exit_boot_services(sys_table, handle, &map, &priv,
                                        exit_boot_func);
 
        if (status == EFI_SUCCESS) {
                efi_set_virtual_address_map_t *svam;
 
-               status = update_fdt_memmap((void *)*new_fdt_addr, &map);
-               if (status != EFI_SUCCESS) {
-                       /*
-                        * The kernel won't get far without the memory map, but
-                        * may still be able to print something meaningful so
-                        * return success here.
-                        */
-                       return EFI_SUCCESS;
-               }
-
                /* Install the new virtual address map */
                svam = sys_table->runtime->set_virtual_address_map;
                status = svam(runtime_entry_count * desc_size, desc_size,
diff --git a/drivers/firmware/efi/libstub/secureboot.c b/drivers/firmware/efi/libstub/secureboot.c
new file mode 100644 (file)
index 0000000..6def402
--- /dev/null
@@ -0,0 +1,84 @@
+/*
+ * Secure boot handling.
+ *
+ * Copyright (C) 2013,2014 Linaro Limited
+ *     Roy Franz <roy.franz@linaro.org
+ * Copyright (C) 2013 Red Hat, Inc.
+ *     Mark Salter <msalter@redhat.com>
+ *
+ * This file is part of the Linux kernel, and is made available under the
+ * terms of the GNU General Public License version 2.
+ */
+#include <linux/efi.h>
+#include <asm/efi.h>
+
+/* BIOS variables */
+static const efi_guid_t efi_variable_guid = EFI_GLOBAL_VARIABLE_GUID;
+static const efi_char16_t const efi_SecureBoot_name[] = {
+       'S', 'e', 'c', 'u', 'r', 'e', 'B', 'o', 'o', 't', 0
+};
+static const efi_char16_t const efi_SetupMode_name[] = {
+       'S', 'e', 't', 'u', 'p', 'M', 'o', 'd', 'e', 0
+};
+
+/* SHIM variables */
+static const efi_guid_t shim_guid = EFI_SHIM_LOCK_GUID;
+static efi_char16_t const shim_MokSBState_name[] = {
+       'M', 'o', 'k', 'S', 'B', 'S', 't', 'a', 't', 'e', 0
+};
+
+#define get_efi_var(name, vendor, ...) \
+       efi_call_runtime(get_variable, \
+                        (efi_char16_t *)(name), (efi_guid_t *)(vendor), \
+                        __VA_ARGS__);
+
+/*
+ * Determine whether we're in secure boot mode.
+ */
+enum efi_secureboot_mode efi_get_secureboot(efi_system_table_t *sys_table_arg)
+{
+       u32 attr;
+       u8 secboot, setupmode, moksbstate;
+       unsigned long size;
+       efi_status_t status;
+
+       size = sizeof(secboot);
+       status = get_efi_var(efi_SecureBoot_name, &efi_variable_guid,
+                            NULL, &size, &secboot);
+       if (status != EFI_SUCCESS)
+               goto out_efi_err;
+
+       size = sizeof(setupmode);
+       status = get_efi_var(efi_SetupMode_name, &efi_variable_guid,
+                            NULL, &size, &setupmode);
+       if (status != EFI_SUCCESS)
+               goto out_efi_err;
+
+       if (secboot == 0 || setupmode == 1)
+               return efi_secureboot_mode_disabled;
+
+       /*
+        * See if a user has put the shim into insecure mode. If so, and if the
+        * variable doesn't have the runtime attribute set, we might as well
+        * honor that.
+        */
+       size = sizeof(moksbstate);
+       status = get_efi_var(shim_MokSBState_name, &shim_guid,
+                            &attr, &size, &moksbstate);
+
+       /* If it fails, we don't care why. Default to secure */
+       if (status != EFI_SUCCESS)
+               goto secure_boot_enabled;
+       if (!(attr & EFI_VARIABLE_RUNTIME_ACCESS) && moksbstate == 1)
+               return efi_secureboot_mode_disabled;
+
+secure_boot_enabled:
+       pr_efi(sys_table_arg, "UEFI Secure Boot is enabled.\n");
+       return efi_secureboot_mode_enabled;
+
+out_efi_err:
+       pr_efi_err(sys_table_arg, "Could not determine UEFI Secure Boot status.\n");
+       if (status == EFI_NOT_FOUND)
+               return efi_secureboot_mode_disabled;
+       return efi_secureboot_mode_unknown;
+}
index 236004b9a50d3224024d0024b8774b6347995065..8986757eafafa29dbf287b9e45b4de592cbc9570 100644 (file)
@@ -43,6 +43,7 @@ int __init efi_memattr_init(void)
 
        tbl_size = sizeof(*tbl) + tbl->num_entries * tbl->desc_size;
        memblock_reserve(efi.mem_attr_table, tbl_size);
+       set_bit(EFI_MEM_ATTR, &efi.flags);
 
 unmap:
        early_memunmap(tbl, sizeof(*tbl));
@@ -174,8 +175,11 @@ int __init efi_memattr_apply_permissions(struct mm_struct *mm,
                                md.phys_addr + size - 1,
                                efi_md_typeattr_format(buf, sizeof(buf), &md));
 
-               if (valid)
+               if (valid) {
                        ret = fn(mm, &md);
+                       if (ret)
+                               pr_err("Error updating mappings, skipping subsequent md's\n");
+               }
        }
        memunmap(tbl);
        return ret;
index 86bf3b84ada56d42758c2b3c57db91351b9f811b..a07ae9e37930767643302ccbec4a7284275a0f25 100644 (file)
@@ -1723,7 +1723,7 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
 }
 
 /**
- * _gpiochip_irqchip_add() - adds an irqchip to a gpiochip
+ * gpiochip_irqchip_add_key() - adds an irqchip to a gpiochip
  * @gpiochip: the gpiochip to add the irqchip to
  * @irqchip: the irqchip to add to the gpiochip
  * @first_irq: if not dynamically assigned, the base (first) IRQ to
@@ -1749,13 +1749,13 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
  * the pins on the gpiochip can generate a unique IRQ. Everything else
  * need to be open coded.
  */
-int _gpiochip_irqchip_add(struct gpio_chip *gpiochip,
-                         struct irq_chip *irqchip,
-                         unsigned int first_irq,
-                         irq_flow_handler_t handler,
-                         unsigned int type,
-                         bool nested,
-                         struct lock_class_key *lock_key)
+int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip,
+                            struct irq_chip *irqchip,
+                            unsigned int first_irq,
+                            irq_flow_handler_t handler,
+                            unsigned int type,
+                            bool nested,
+                            struct lock_class_key *lock_key)
 {
        struct device_node *of_node;
        bool irq_base_set = false;
@@ -1840,7 +1840,7 @@ int _gpiochip_irqchip_add(struct gpio_chip *gpiochip,
 
        return 0;
 }
-EXPORT_SYMBOL_GPL(_gpiochip_irqchip_add);
+EXPORT_SYMBOL_GPL(gpiochip_irqchip_add_key);
 
 #else /* CONFIG_GPIOLIB_IRQCHIP */
 
index 29d6d84d1c28b1e847715c08b6fac8f9d2a9aac3..41e41f90265df07af3299f65022feaf731e99ce7 100644 (file)
@@ -83,6 +83,13 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
                }
                break;
        }
+
+       if (!(*out_ring && (*out_ring)->adev)) {
+               DRM_ERROR("Ring %d is not initialized on IP %d\n",
+                         ring, ip_type);
+               return -EINVAL;
+       }
+
        return 0;
 }
 
index 9999dc71b998599f909464c32e98ca8cc1be5ddb..ccb5e02e7b20ffb3f8cb1776ea6a67a1b879cb33 100644 (file)
@@ -2512,6 +2512,8 @@ static int dce_v10_0_cursor_move_locked(struct drm_crtc *crtc,
 
        WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
        WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
+       WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
+              ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
 
        return 0;
 }
@@ -2537,7 +2539,6 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
                                      int32_t hot_y)
 {
        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-       struct amdgpu_device *adev = crtc->dev->dev_private;
        struct drm_gem_object *obj;
        struct amdgpu_bo *aobj;
        int ret;
@@ -2578,7 +2579,9 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
 
        dce_v10_0_lock_cursor(crtc, true);
 
-       if (hot_x != amdgpu_crtc->cursor_hot_x ||
+       if (width != amdgpu_crtc->cursor_width ||
+           height != amdgpu_crtc->cursor_height ||
+           hot_x != amdgpu_crtc->cursor_hot_x ||
            hot_y != amdgpu_crtc->cursor_hot_y) {
                int x, y;
 
@@ -2587,16 +2590,10 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
 
                dce_v10_0_cursor_move_locked(crtc, x, y);
 
-               amdgpu_crtc->cursor_hot_x = hot_x;
-               amdgpu_crtc->cursor_hot_y = hot_y;
-       }
-
-       if (width != amdgpu_crtc->cursor_width ||
-           height != amdgpu_crtc->cursor_height) {
-               WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
-                      (width - 1) << 16 | (height - 1));
                amdgpu_crtc->cursor_width = width;
                amdgpu_crtc->cursor_height = height;
+               amdgpu_crtc->cursor_hot_x = hot_x;
+               amdgpu_crtc->cursor_hot_y = hot_y;
        }
 
        dce_v10_0_show_cursor(crtc);
@@ -2620,7 +2617,6 @@ unpin:
 static void dce_v10_0_cursor_reset(struct drm_crtc *crtc)
 {
        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-       struct amdgpu_device *adev = crtc->dev->dev_private;
 
        if (amdgpu_crtc->cursor_bo) {
                dce_v10_0_lock_cursor(crtc, true);
@@ -2628,10 +2624,6 @@ static void dce_v10_0_cursor_reset(struct drm_crtc *crtc)
                dce_v10_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
                                             amdgpu_crtc->cursor_y);
 
-               WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
-                      (amdgpu_crtc->cursor_width - 1) << 16 |
-                      (amdgpu_crtc->cursor_height - 1));
-
                dce_v10_0_show_cursor(crtc);
 
                dce_v10_0_lock_cursor(crtc, false);
index 2006abbbfb6216d34c3fb3e18573e17f37561f18..a7af5b33a5e30e1279619c096295e12e415acafe 100644 (file)
@@ -2532,6 +2532,8 @@ static int dce_v11_0_cursor_move_locked(struct drm_crtc *crtc,
 
        WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
        WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
+       WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
+              ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
 
        return 0;
 }
@@ -2557,7 +2559,6 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
                                      int32_t hot_y)
 {
        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-       struct amdgpu_device *adev = crtc->dev->dev_private;
        struct drm_gem_object *obj;
        struct amdgpu_bo *aobj;
        int ret;
@@ -2598,7 +2599,9 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
 
        dce_v11_0_lock_cursor(crtc, true);
 
-       if (hot_x != amdgpu_crtc->cursor_hot_x ||
+       if (width != amdgpu_crtc->cursor_width ||
+           height != amdgpu_crtc->cursor_height ||
+           hot_x != amdgpu_crtc->cursor_hot_x ||
            hot_y != amdgpu_crtc->cursor_hot_y) {
                int x, y;
 
@@ -2607,16 +2610,10 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
 
                dce_v11_0_cursor_move_locked(crtc, x, y);
 
-               amdgpu_crtc->cursor_hot_x = hot_x;
-               amdgpu_crtc->cursor_hot_y = hot_y;
-       }
-
-       if (width != amdgpu_crtc->cursor_width ||
-           height != amdgpu_crtc->cursor_height) {
-               WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
-                      (width - 1) << 16 | (height - 1));
                amdgpu_crtc->cursor_width = width;
                amdgpu_crtc->cursor_height = height;
+               amdgpu_crtc->cursor_hot_x = hot_x;
+               amdgpu_crtc->cursor_hot_y = hot_y;
        }
 
        dce_v11_0_show_cursor(crtc);
@@ -2640,7 +2637,6 @@ unpin:
 static void dce_v11_0_cursor_reset(struct drm_crtc *crtc)
 {
        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-       struct amdgpu_device *adev = crtc->dev->dev_private;
 
        if (amdgpu_crtc->cursor_bo) {
                dce_v11_0_lock_cursor(crtc, true);
@@ -2648,10 +2644,6 @@ static void dce_v11_0_cursor_reset(struct drm_crtc *crtc)
                dce_v11_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
                                             amdgpu_crtc->cursor_y);
 
-               WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
-                      (amdgpu_crtc->cursor_width - 1) << 16 |
-                      (amdgpu_crtc->cursor_height - 1));
-
                dce_v11_0_show_cursor(crtc);
 
                dce_v11_0_lock_cursor(crtc, false);
index b4e4ec630e8cfd5d38d55b61199f3cc60269e0c2..39df6a50637f5a9de0d98099c12e78188dc3ef4a 100644 (file)
@@ -1859,6 +1859,8 @@ static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc,
        struct amdgpu_device *adev = crtc->dev->dev_private;
        int xorigin = 0, yorigin = 0;
 
+       int w = amdgpu_crtc->cursor_width;
+
        amdgpu_crtc->cursor_x = x;
        amdgpu_crtc->cursor_y = y;
 
@@ -1878,6 +1880,8 @@ static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc,
 
        WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
        WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
+       WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
+              ((w - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
 
        return 0;
 }
@@ -1903,7 +1907,6 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
                                     int32_t hot_y)
 {
        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-       struct amdgpu_device *adev = crtc->dev->dev_private;
        struct drm_gem_object *obj;
        struct amdgpu_bo *aobj;
        int ret;
@@ -1944,7 +1947,9 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
 
        dce_v6_0_lock_cursor(crtc, true);
 
-       if (hot_x != amdgpu_crtc->cursor_hot_x ||
+       if (width != amdgpu_crtc->cursor_width ||
+           height != amdgpu_crtc->cursor_height ||
+           hot_x != amdgpu_crtc->cursor_hot_x ||
            hot_y != amdgpu_crtc->cursor_hot_y) {
                int x, y;
 
@@ -1953,16 +1958,10 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
 
                dce_v6_0_cursor_move_locked(crtc, x, y);
 
-               amdgpu_crtc->cursor_hot_x = hot_x;
-               amdgpu_crtc->cursor_hot_y = hot_y;
-       }
-
-       if (width != amdgpu_crtc->cursor_width ||
-           height != amdgpu_crtc->cursor_height) {
-               WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
-                      (width - 1) << 16 | (height - 1));
                amdgpu_crtc->cursor_width = width;
                amdgpu_crtc->cursor_height = height;
+               amdgpu_crtc->cursor_hot_x = hot_x;
+               amdgpu_crtc->cursor_hot_y = hot_y;
        }
 
        dce_v6_0_show_cursor(crtc);
@@ -1986,7 +1985,6 @@ unpin:
 static void dce_v6_0_cursor_reset(struct drm_crtc *crtc)
 {
        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-       struct amdgpu_device *adev = crtc->dev->dev_private;
 
        if (amdgpu_crtc->cursor_bo) {
                dce_v6_0_lock_cursor(crtc, true);
@@ -1994,10 +1992,6 @@ static void dce_v6_0_cursor_reset(struct drm_crtc *crtc)
                dce_v6_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
                                            amdgpu_crtc->cursor_y);
 
-               WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
-                      (amdgpu_crtc->cursor_width - 1) << 16 |
-                      (amdgpu_crtc->cursor_height - 1));
-
                dce_v6_0_show_cursor(crtc);
                dce_v6_0_lock_cursor(crtc, false);
        }
index 584abe834a3ce4658de0e54f6aae416729ad868f..28102bb1704d0b618318a9bcf4b9668bce899150 100644 (file)
@@ -2363,6 +2363,8 @@ static int dce_v8_0_cursor_move_locked(struct drm_crtc *crtc,
 
        WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
        WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
+       WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
+              ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
 
        return 0;
 }
@@ -2388,7 +2390,6 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
                                     int32_t hot_y)
 {
        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-       struct amdgpu_device *adev = crtc->dev->dev_private;
        struct drm_gem_object *obj;
        struct amdgpu_bo *aobj;
        int ret;
@@ -2429,7 +2430,9 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
 
        dce_v8_0_lock_cursor(crtc, true);
 
-       if (hot_x != amdgpu_crtc->cursor_hot_x ||
+       if (width != amdgpu_crtc->cursor_width ||
+           height != amdgpu_crtc->cursor_height ||
+           hot_x != amdgpu_crtc->cursor_hot_x ||
            hot_y != amdgpu_crtc->cursor_hot_y) {
                int x, y;
 
@@ -2438,16 +2441,10 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
 
                dce_v8_0_cursor_move_locked(crtc, x, y);
 
-               amdgpu_crtc->cursor_hot_x = hot_x;
-               amdgpu_crtc->cursor_hot_y = hot_y;
-       }
-
-       if (width != amdgpu_crtc->cursor_width ||
-           height != amdgpu_crtc->cursor_height) {
-               WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
-                      (width - 1) << 16 | (height - 1));
                amdgpu_crtc->cursor_width = width;
                amdgpu_crtc->cursor_height = height;
+               amdgpu_crtc->cursor_hot_x = hot_x;
+               amdgpu_crtc->cursor_hot_y = hot_y;
        }
 
        dce_v8_0_show_cursor(crtc);
@@ -2471,7 +2468,6 @@ unpin:
 static void dce_v8_0_cursor_reset(struct drm_crtc *crtc)
 {
        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-       struct amdgpu_device *adev = crtc->dev->dev_private;
 
        if (amdgpu_crtc->cursor_bo) {
                dce_v8_0_lock_cursor(crtc, true);
@@ -2479,10 +2475,6 @@ static void dce_v8_0_cursor_reset(struct drm_crtc *crtc)
                dce_v8_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
                                            amdgpu_crtc->cursor_y);
 
-               WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
-                      (amdgpu_crtc->cursor_width - 1) << 16 |
-                      (amdgpu_crtc->cursor_height - 1));
-
                dce_v8_0_show_cursor(crtc);
 
                dce_v8_0_lock_cursor(crtc, false);
index 762f8e82ceb7465f56aa8cfcd7124ce0fc28acda..e9a176891e13319d77e10b643a44b9e893e562ac 100644 (file)
@@ -627,11 +627,8 @@ static const struct drm_encoder_helper_funcs dce_virtual_encoder_helper_funcs =
 
 static void dce_virtual_encoder_destroy(struct drm_encoder *encoder)
 {
-       struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
-
-       kfree(amdgpu_encoder->enc_priv);
        drm_encoder_cleanup(encoder);
-       kfree(amdgpu_encoder);
+       kfree(encoder);
 }
 
 static const struct drm_encoder_funcs dce_virtual_encoder_funcs = {
index 45a573e63d4ae9778cff0b022ef53e22163e9a7b..0635829b18cf3aed41239079e4208336b00cda0f 100644 (file)
@@ -44,6 +44,7 @@ MODULE_FIRMWARE("radeon/tahiti_mc.bin");
 MODULE_FIRMWARE("radeon/pitcairn_mc.bin");
 MODULE_FIRMWARE("radeon/verde_mc.bin");
 MODULE_FIRMWARE("radeon/oland_mc.bin");
+MODULE_FIRMWARE("radeon/si58_mc.bin");
 
 #define MC_SEQ_MISC0__MT__MASK   0xf0000000
 #define MC_SEQ_MISC0__MT__GDDR1  0x10000000
@@ -113,6 +114,7 @@ static int gmc_v6_0_init_microcode(struct amdgpu_device *adev)
        const char *chip_name;
        char fw_name[30];
        int err;
+       bool is_58_fw = false;
 
        DRM_DEBUG("\n");
 
@@ -135,7 +137,14 @@ static int gmc_v6_0_init_microcode(struct amdgpu_device *adev)
        default: BUG();
        }
 
-       snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
+       /* this memory configuration requires special firmware */
+       if (((RREG32(mmMC_SEQ_MISC0) & 0xff000000) >> 24) == 0x58)
+               is_58_fw = true;
+
+       if (is_58_fw)
+               snprintf(fw_name, sizeof(fw_name), "radeon/si58_mc.bin");
+       else
+               snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
        err = request_firmware(&adev->mc.fw, fw_name, adev->dev);
        if (err)
                goto out;
@@ -245,6 +254,9 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
        }
        WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
 
+       if (adev->mode_info.num_crtc)
+               amdgpu_display_set_vga_render_state(adev, false);
+
        gmc_v6_0_mc_stop(adev, &save);
 
        if (gmc_v6_0_wait_for_idle((void *)adev)) {
@@ -274,7 +286,6 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
                dev_warn(adev->dev, "Wait for MC idle timedout !\n");
        }
        gmc_v6_0_mc_resume(adev, &save);
-       amdgpu_display_set_vga_render_state(adev, false);
 }
 
 static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
@@ -463,19 +474,11 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
        WREG32(mmVM_CONTEXT1_CNTL,
               VM_CONTEXT1_CNTL__ENABLE_CONTEXT_MASK |
               (1UL << VM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH__SHIFT) |
-              ((amdgpu_vm_block_size - 9) << VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT) |
-              VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
-              VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK |
-              VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
-              VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK |
-              VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
-              VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK |
-              VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
-              VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK |
-              VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
-              VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK |
-              VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
-              VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK);
+              ((amdgpu_vm_block_size - 9) << VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT));
+       if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
+               gmc_v6_0_set_fault_enable_default(adev, false);
+       else
+               gmc_v6_0_set_fault_enable_default(adev, true);
 
        gmc_v6_0_gart_flush_gpu_tlb(adev, 0);
        dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n",
@@ -754,7 +757,10 @@ static int gmc_v6_0_late_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
+       if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
+               return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
+       else
+               return 0;
 }
 
 static int gmc_v6_0_sw_init(void *handle)
index 10bedfac27b8118dae6735a65ecebcbdd4109d2b..6e150db8f380417870cea60283ecacaf1c14fa7d 100644 (file)
@@ -64,6 +64,7 @@ MODULE_FIRMWARE("radeon/oland_smc.bin");
 MODULE_FIRMWARE("radeon/oland_k_smc.bin");
 MODULE_FIRMWARE("radeon/hainan_smc.bin");
 MODULE_FIRMWARE("radeon/hainan_k_smc.bin");
+MODULE_FIRMWARE("radeon/banks_k_2_smc.bin");
 
 union power_info {
        struct _ATOM_POWERPLAY_INFO info;
@@ -3487,17 +3488,6 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
                    (adev->pdev->device == 0x6817) ||
                    (adev->pdev->device == 0x6806))
                        max_mclk = 120000;
-       } else if (adev->asic_type == CHIP_OLAND) {
-               if ((adev->pdev->revision == 0xC7) ||
-                   (adev->pdev->revision == 0x80) ||
-                   (adev->pdev->revision == 0x81) ||
-                   (adev->pdev->revision == 0x83) ||
-                   (adev->pdev->revision == 0x87) ||
-                   (adev->pdev->device == 0x6604) ||
-                   (adev->pdev->device == 0x6605)) {
-                       max_sclk = 75000;
-                       max_mclk = 80000;
-               }
        } else if (adev->asic_type == CHIP_HAINAN) {
                if ((adev->pdev->revision == 0x81) ||
                    (adev->pdev->revision == 0x83) ||
@@ -3506,7 +3496,6 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
                    (adev->pdev->device == 0x6665) ||
                    (adev->pdev->device == 0x6667)) {
                        max_sclk = 75000;
-                       max_mclk = 80000;
                }
        }
        /* Apply dpm quirks */
@@ -7713,10 +7702,11 @@ static int si_dpm_init_microcode(struct amdgpu_device *adev)
                        ((adev->pdev->device == 0x6660) ||
                        (adev->pdev->device == 0x6663) ||
                        (adev->pdev->device == 0x6665) ||
-                       (adev->pdev->device == 0x6667))) ||
-                   ((adev->pdev->revision == 0xc3) &&
-                       (adev->pdev->device == 0x6665)))
+                        (adev->pdev->device == 0x6667))))
                        chip_name = "hainan_k";
+               else if ((adev->pdev->revision == 0xc3) &&
+                        (adev->pdev->device == 0x6665))
+                       chip_name = "banks_k_2";
                else
                        chip_name = "hainan";
                break;
index 96444e4d862af3f011c3b3de8481aafe39529414..7fb9137dd89b1c2bc064c3ea516c243721b5dafe 100644 (file)
 #include "smu/smu_7_0_1_sh_mask.h"
 
 static void uvd_v4_2_mc_resume(struct amdgpu_device *adev);
-static void uvd_v4_2_init_cg(struct amdgpu_device *adev);
 static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev);
 static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev);
 static int uvd_v4_2_start(struct amdgpu_device *adev);
 static void uvd_v4_2_stop(struct amdgpu_device *adev);
 static int uvd_v4_2_set_clockgating_state(void *handle,
                                enum amd_clockgating_state state);
+static void uvd_v4_2_set_dcm(struct amdgpu_device *adev,
+                            bool sw_mode);
 /**
  * uvd_v4_2_ring_get_rptr - get read pointer
  *
@@ -140,7 +141,8 @@ static int uvd_v4_2_sw_fini(void *handle)
 
        return r;
 }
-
+static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev,
+                                bool enable);
 /**
  * uvd_v4_2_hw_init - start and test UVD block
  *
@@ -155,8 +157,7 @@ static int uvd_v4_2_hw_init(void *handle)
        uint32_t tmp;
        int r;
 
-       uvd_v4_2_init_cg(adev);
-       uvd_v4_2_set_clockgating_state(adev, AMD_CG_STATE_GATE);
+       uvd_v4_2_enable_mgcg(adev, true);
        amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
        r = uvd_v4_2_start(adev);
        if (r)
@@ -266,11 +267,13 @@ static int uvd_v4_2_start(struct amdgpu_device *adev)
        struct amdgpu_ring *ring = &adev->uvd.ring;
        uint32_t rb_bufsz;
        int i, j, r;
-
        /* disable byte swapping */
        u32 lmi_swap_cntl = 0;
        u32 mp_swap_cntl = 0;
 
+       WREG32(mmUVD_CGC_GATE, 0);
+       uvd_v4_2_set_dcm(adev, true);
+
        uvd_v4_2_mc_resume(adev);
 
        /* disable interupt */
@@ -406,6 +409,8 @@ static void uvd_v4_2_stop(struct amdgpu_device *adev)
 
        /* Unstall UMC and register bus */
        WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
+
+       uvd_v4_2_set_dcm(adev, false);
 }
 
 /**
@@ -619,19 +624,6 @@ static void uvd_v4_2_set_dcm(struct amdgpu_device *adev,
        WREG32_UVD_CTX(ixUVD_CGC_CTRL2, tmp2);
 }
 
-static void uvd_v4_2_init_cg(struct amdgpu_device *adev)
-{
-       bool hw_mode = true;
-
-       if (hw_mode) {
-               uvd_v4_2_set_dcm(adev, false);
-       } else {
-               u32 tmp = RREG32(mmUVD_CGC_CTRL);
-               tmp &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
-               WREG32(mmUVD_CGC_CTRL, tmp);
-       }
-}
-
 static bool uvd_v4_2_is_idle(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -685,17 +677,6 @@ static int uvd_v4_2_process_interrupt(struct amdgpu_device *adev,
 static int uvd_v4_2_set_clockgating_state(void *handle,
                                          enum amd_clockgating_state state)
 {
-       bool gate = false;
-       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
-       if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
-               return 0;
-
-       if (state == AMD_CG_STATE_GATE)
-               gate = true;
-
-       uvd_v4_2_enable_mgcg(adev, gate);
-
        return 0;
 }
 
@@ -711,9 +692,6 @@ static int uvd_v4_2_set_powergating_state(void *handle,
         */
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
-               return 0;
-
        if (state == AMD_PG_STATE_GATE) {
                uvd_v4_2_stop(adev);
                return 0;
index 5fb0b7f5c065121218ea4befe1a9da8def2e3035..37ca685e5a9a9e358eaab6d32b9d5758fec90565 100644 (file)
 
 #define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT    0x04
 #define GRBM_GFX_INDEX__VCE_INSTANCE_MASK      0x10
+#define GRBM_GFX_INDEX__VCE_ALL_PIPE           0x07
+
 #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0        0x8616
 #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1        0x8617
 #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2        0x8618
+#define mmGRBM_GFX_INDEX_DEFAULT 0xE0000000
+
 #define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK  0x02
 
 #define VCE_V3_0_FW_SIZE       (384 * 1024)
@@ -54,6 +58,9 @@
 
 #define FW_52_8_3      ((52 << 24) | (8 << 16) | (3 << 8))
 
+#define GET_VCE_INSTANCE(i)  ((i) << GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT \
+                                       | GRBM_GFX_INDEX__VCE_ALL_PIPE)
+
 static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx);
 static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev);
 static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev);
@@ -175,7 +182,7 @@ static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev,
                WREG32(mmVCE_UENC_CLOCK_GATING_2, data);
 
                data = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
-               data &= ~0xffc00000;
+               data &= ~0x3ff;
                WREG32(mmVCE_UENC_REG_CLOCK_GATING, data);
 
                data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL);
@@ -249,7 +256,7 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
                if (adev->vce.harvest_config & (1 << idx))
                        continue;
 
-               WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, idx);
+               WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx));
                vce_v3_0_mc_resume(adev, idx);
                WREG32_FIELD(VCE_STATUS, JOB_BUSY, 1);
 
@@ -273,7 +280,7 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
                }
        }
 
-       WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
+       WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
        mutex_unlock(&adev->grbm_idx_mutex);
 
        return 0;
@@ -288,7 +295,7 @@ static int vce_v3_0_stop(struct amdgpu_device *adev)
                if (adev->vce.harvest_config & (1 << idx))
                        continue;
 
-               WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, idx);
+               WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx));
 
                if (adev->asic_type >= CHIP_STONEY)
                        WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x200001);
@@ -306,7 +313,7 @@ static int vce_v3_0_stop(struct amdgpu_device *adev)
                        vce_v3_0_set_vce_sw_clock_gating(adev, false);
        }
 
-       WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
+       WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
        mutex_unlock(&adev->grbm_idx_mutex);
 
        return 0;
@@ -586,17 +593,17 @@ static bool vce_v3_0_check_soft_reset(void *handle)
         * VCE team suggest use bit 3--bit 6 for busy status check
         */
        mutex_lock(&adev->grbm_idx_mutex);
-       WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0);
+       WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
        if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
                srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
                srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
        }
-       WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0x10);
+       WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
        if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
                srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
                srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
        }
-       WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0);
+       WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
        mutex_unlock(&adev->grbm_idx_mutex);
 
        if (srbm_soft_reset) {
@@ -734,7 +741,7 @@ static int vce_v3_0_set_clockgating_state(void *handle,
                if (adev->vce.harvest_config & (1 << i))
                        continue;
 
-               WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, i);
+               WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(i));
 
                if (enable) {
                        /* initialize VCE_CLOCK_GATING_A: Clock ON/OFF delay */
@@ -753,7 +760,7 @@ static int vce_v3_0_set_clockgating_state(void *handle,
                vce_v3_0_set_vce_sw_clock_gating(adev, enable);
        }
 
-       WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
+       WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
        mutex_unlock(&adev->grbm_idx_mutex);
 
        return 0;
index b0c63c5f54c9ca95bc29eb069191455d1b56c0cd..6bb79c94cb9ffb5d7bec4fb104f9e32b7c3b3046 100644 (file)
@@ -200,7 +200,7 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
                                cgs_set_clockgating_state(
                                                        hwmgr->device,
                                                        AMD_IP_BLOCK_TYPE_VCE,
-                                                       AMD_CG_STATE_UNGATE);
+                                                       AMD_CG_STATE_GATE);
                                cgs_set_powergating_state(
                                                        hwmgr->device,
                                                        AMD_IP_BLOCK_TYPE_VCE,
@@ -218,7 +218,7 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
                                cgs_set_clockgating_state(
                                                        hwmgr->device,
                                                        AMD_IP_BLOCK_TYPE_VCE,
-                                                       AMD_PG_STATE_GATE);
+                                                       AMD_PG_STATE_UNGATE);
                                cz_dpm_update_vce_dpm(hwmgr);
                                cz_enable_disable_vce_dpm(hwmgr, true);
                                return 0;
index 4b14f259a147039e8e0eacc92da77f6266a2571e..0fb4e8c8f5e13866120de7325dd801c59f1d940f 100644 (file)
@@ -1402,14 +1402,22 @@ int  cz_dpm_update_vce_dpm(struct pp_hwmgr *hwmgr)
                                             cz_hwmgr->vce_dpm.hard_min_clk,
                                                PPSMC_MSG_SetEclkHardMin));
        } else {
-               /*EPR# 419220 -HW limitation to to */
-               cz_hwmgr->vce_dpm.hard_min_clk = hwmgr->vce_arbiter.ecclk;
-               smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-                                           PPSMC_MSG_SetEclkHardMin,
-                                           cz_get_eclk_level(hwmgr,
-                                    cz_hwmgr->vce_dpm.hard_min_clk,
-                                         PPSMC_MSG_SetEclkHardMin));
-
+               /*Program HardMin based on the vce_arbiter.ecclk */
+               if (hwmgr->vce_arbiter.ecclk == 0) {
+                       smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+                                           PPSMC_MSG_SetEclkHardMin, 0);
+               /* disable ECLK DPM 0. Otherwise VCE could hang if
+                * switching SCLK from DPM 0 to 6/7 */
+                       smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+                                       PPSMC_MSG_SetEclkSoftMin, 1);
+               } else {
+                       cz_hwmgr->vce_dpm.hard_min_clk = hwmgr->vce_arbiter.ecclk;
+                       smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+                                               PPSMC_MSG_SetEclkHardMin,
+                                               cz_get_eclk_level(hwmgr,
+                                               cz_hwmgr->vce_dpm.hard_min_clk,
+                                               PPSMC_MSG_SetEclkHardMin));
+               }
        }
        return 0;
 }
index 908011d2c8f5200e92cc8db772b85315d5c16343..7abda94fc2cf3bb43ec686ac4e699a43b77a11d0 100644 (file)
@@ -113,6 +113,7 @@ struct ast_private {
        struct ttm_bo_kmap_obj cache_kmap;
        int next_cursor;
        bool support_wide_screen;
+       bool DisableP2A;
 
        enum ast_tx_chip tx_chip_type;
        u8 dp501_maxclk;
index f75c6421db6239c9435ed39dc7d6244d13894920..533e762d036dc272afbdf4d2bce146b6f1b450d9 100644 (file)
@@ -124,6 +124,12 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
        } else
                *need_post = false;
 
+       /* Check P2A Access */
+       ast->DisableP2A = true;
+       data = ast_read32(ast, 0xf004);
+       if (data != 0xFFFFFFFF)
+               ast->DisableP2A = false;
+
        /* Check if we support wide screen */
        switch (ast->chip) {
        case AST1180:
@@ -140,15 +146,17 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
                        ast->support_wide_screen = true;
                else {
                        ast->support_wide_screen = false;
-                       /* Read SCU7c (silicon revision register) */
-                       ast_write32(ast, 0xf004, 0x1e6e0000);
-                       ast_write32(ast, 0xf000, 0x1);
-                       data = ast_read32(ast, 0x1207c);
-                       data &= 0x300;
-                       if (ast->chip == AST2300 && data == 0x0) /* ast1300 */
-                               ast->support_wide_screen = true;
-                       if (ast->chip == AST2400 && data == 0x100) /* ast1400 */
-                               ast->support_wide_screen = true;
+                       if (ast->DisableP2A == false) {
+                               /* Read SCU7c (silicon revision register) */
+                               ast_write32(ast, 0xf004, 0x1e6e0000);
+                               ast_write32(ast, 0xf000, 0x1);
+                               data = ast_read32(ast, 0x1207c);
+                               data &= 0x300;
+                               if (ast->chip == AST2300 && data == 0x0) /* ast1300 */
+                                       ast->support_wide_screen = true;
+                               if (ast->chip == AST2400 && data == 0x100) /* ast1400 */
+                                       ast->support_wide_screen = true;
+                       }
                }
                break;
        }
@@ -216,80 +224,81 @@ static int ast_get_dram_info(struct drm_device *dev)
        uint32_t data, data2;
        uint32_t denum, num, div, ref_pll;
 
-       ast_write32(ast, 0xf004, 0x1e6e0000);
-       ast_write32(ast, 0xf000, 0x1);
-
-
-       ast_write32(ast, 0x10000, 0xfc600309);
-
-       do {
-               if (pci_channel_offline(dev->pdev))
-                       return -EIO;
-       } while (ast_read32(ast, 0x10000) != 0x01);
-       data = ast_read32(ast, 0x10004);
-
-       if (data & 0x40)
+       if (ast->DisableP2A)
+       {
                ast->dram_bus_width = 16;
+               ast->dram_type = AST_DRAM_1Gx16;
+               ast->mclk = 396;
+       }
        else
-               ast->dram_bus_width = 32;
+       {
+               ast_write32(ast, 0xf004, 0x1e6e0000);
+               ast_write32(ast, 0xf000, 0x1);
+               data = ast_read32(ast, 0x10004);
+
+               if (data & 0x40)
+                       ast->dram_bus_width = 16;
+               else
+                       ast->dram_bus_width = 32;
+
+               if (ast->chip == AST2300 || ast->chip == AST2400) {
+                       switch (data & 0x03) {
+                       case 0:
+                               ast->dram_type = AST_DRAM_512Mx16;
+                               break;
+                       default:
+                       case 1:
+                               ast->dram_type = AST_DRAM_1Gx16;
+                               break;
+                       case 2:
+                               ast->dram_type = AST_DRAM_2Gx16;
+                               break;
+                       case 3:
+                               ast->dram_type = AST_DRAM_4Gx16;
+                               break;
+                       }
+               } else {
+                       switch (data & 0x0c) {
+                       case 0:
+                       case 4:
+                               ast->dram_type = AST_DRAM_512Mx16;
+                               break;
+                       case 8:
+                               if (data & 0x40)
+                                       ast->dram_type = AST_DRAM_1Gx16;
+                               else
+                                       ast->dram_type = AST_DRAM_512Mx32;
+                               break;
+                       case 0xc:
+                               ast->dram_type = AST_DRAM_1Gx32;
+                               break;
+                       }
+               }
 
-       if (ast->chip == AST2300 || ast->chip == AST2400) {
-               switch (data & 0x03) {
-               case 0:
-                       ast->dram_type = AST_DRAM_512Mx16;
-                       break;
-               default:
-               case 1:
-                       ast->dram_type = AST_DRAM_1Gx16;
-                       break;
-               case 2:
-                       ast->dram_type = AST_DRAM_2Gx16;
-                       break;
+               data = ast_read32(ast, 0x10120);
+               data2 = ast_read32(ast, 0x10170);
+               if (data2 & 0x2000)
+                       ref_pll = 14318;
+               else
+                       ref_pll = 12000;
+
+               denum = data & 0x1f;
+               num = (data & 0x3fe0) >> 5;
+               data = (data & 0xc000) >> 14;
+               switch (data) {
                case 3:
-                       ast->dram_type = AST_DRAM_4Gx16;
-                       break;
-               }
-       } else {
-               switch (data & 0x0c) {
-               case 0:
-               case 4:
-                       ast->dram_type = AST_DRAM_512Mx16;
+                       div = 0x4;
                        break;
-               case 8:
-                       if (data & 0x40)
-                               ast->dram_type = AST_DRAM_1Gx16;
-                       else
-                               ast->dram_type = AST_DRAM_512Mx32;
+               case 2:
+               case 1:
+                       div = 0x2;
                        break;
-               case 0xc:
-                       ast->dram_type = AST_DRAM_1Gx32;
+               default:
+                       div = 0x1;
                        break;
                }
+               ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000);
        }
-
-       data = ast_read32(ast, 0x10120);
-       data2 = ast_read32(ast, 0x10170);
-       if (data2 & 0x2000)
-               ref_pll = 14318;
-       else
-               ref_pll = 12000;
-
-       denum = data & 0x1f;
-       num = (data & 0x3fe0) >> 5;
-       data = (data & 0xc000) >> 14;
-       switch (data) {
-       case 3:
-               div = 0x4;
-               break;
-       case 2:
-       case 1:
-               div = 0x2;
-               break;
-       default:
-               div = 0x1;
-               break;
-       }
-       ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000);
        return 0;
 }
 
index 810c51d92b99f81a441f37d02e319ed25b7768e5..5331ee1df086e7ae3950e33efc38975b544abdd4 100644 (file)
@@ -379,12 +379,20 @@ void ast_post_gpu(struct drm_device *dev)
        ast_open_key(ast);
        ast_set_def_ext_reg(dev);
 
-       if (ast->chip == AST2300 || ast->chip == AST2400)
-               ast_init_dram_2300(dev);
-       else
-               ast_init_dram_reg(dev);
+       if (ast->DisableP2A == false)
+       {
+               if (ast->chip == AST2300 || ast->chip == AST2400)
+                       ast_init_dram_2300(dev);
+               else
+                       ast_init_dram_reg(dev);
 
-       ast_init_3rdtx(dev);
+               ast_init_3rdtx(dev);
+       }
+       else
+       {
+               if (ast->tx_chip_type != AST_TX_NONE)
+                       ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xcf, 0x80);        /* Enable DVO */
+       }
 }
 
 /* AST 2300 DRAM settings */
index eb9bf8786c249744418b601bae38bb78ae04f2a7..18eefdcbf1ba9f39bb225b371ac9b822bb277485 100644 (file)
@@ -1382,6 +1382,7 @@ int analogix_dp_bind(struct device *dev, struct drm_device *drm_dev,
 
        pm_runtime_enable(dev);
 
+       pm_runtime_get_sync(dev);
        phy_power_on(dp->phy);
 
        analogix_dp_init_dp(dp);
@@ -1414,9 +1415,15 @@ int analogix_dp_bind(struct device *dev, struct drm_device *drm_dev,
                goto err_disable_pm_runtime;
        }
 
+       phy_power_off(dp->phy);
+       pm_runtime_put(dev);
+
        return 0;
 
 err_disable_pm_runtime:
+
+       phy_power_off(dp->phy);
+       pm_runtime_put(dev);
        pm_runtime_disable(dev);
 
        return ret;
index 04b3c161dfae6fc9c643229f2a312eb44abca62a..7f4cc6e172abaeab8b4007497daf309058c5e554 100644 (file)
@@ -7,3 +7,12 @@ config DRM_CIRRUS_QEMU
         This is a KMS driver for emulated cirrus device in qemu.
         It is *NOT* intended for real cirrus devices. This requires
         the modesetting userspace X.org driver.
+
+        Cirrus is obsolete, the hardware was designed in the 90ies
+        and can't keep up with todays needs.  More background:
+        https://www.kraxel.org/blog/2014/10/qemu-using-cirrus-considered-harmful/
+
+        Better alternatives are:
+          - stdvga (DRM_BOCHS, qemu -vga std, default in qemu 2.2+)
+          - qxl (DRM_QXL, qemu -vga qxl, works best with spice)
+          - virtio (DRM_VIRTIO_GPU), qemu -vga virtio)
index 60697482b94c8136ea2720dbf3b9f81c94e1d823..fdfb1ec17e660efa0b1f2c1f7273fa8d6fd8567a 100644 (file)
@@ -291,15 +291,15 @@ drm_atomic_get_crtc_state(struct drm_atomic_state *state,
 EXPORT_SYMBOL(drm_atomic_get_crtc_state);
 
 static void set_out_fence_for_crtc(struct drm_atomic_state *state,
-                                  struct drm_crtc *crtc, s64 __user *fence_ptr)
+                                  struct drm_crtc *crtc, s32 __user *fence_ptr)
 {
        state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = fence_ptr;
 }
 
-static s64 __user *get_out_fence_for_crtc(struct drm_atomic_state *state,
+static s32 __user *get_out_fence_for_crtc(struct drm_atomic_state *state,
                                          struct drm_crtc *crtc)
 {
-       s64 __user *fence_ptr;
+       s32 __user *fence_ptr;
 
        fence_ptr = state->crtcs[drm_crtc_index(crtc)].out_fence_ptr;
        state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = NULL;
@@ -512,7 +512,7 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
                state->color_mgmt_changed |= replaced;
                return ret;
        } else if (property == config->prop_out_fence_ptr) {
-               s64 __user *fence_ptr = u64_to_user_ptr(val);
+               s32 __user *fence_ptr = u64_to_user_ptr(val);
 
                if (!fence_ptr)
                        return 0;
@@ -1915,7 +1915,7 @@ EXPORT_SYMBOL(drm_atomic_clean_old_fb);
  */
 
 struct drm_out_fence_state {
-       s64 __user *out_fence_ptr;
+       s32 __user *out_fence_ptr;
        struct sync_file *sync_file;
        int fd;
 };
@@ -1952,7 +1952,7 @@ static int prepare_crtc_signaling(struct drm_device *dev,
                return 0;
 
        for_each_crtc_in_state(state, crtc, crtc_state, i) {
-               u64 __user *fence_ptr;
+               s32 __user *fence_ptr;
 
                fence_ptr = get_out_fence_for_crtc(crtc_state->state, crtc);
 
@@ -2032,13 +2032,16 @@ static void complete_crtc_signaling(struct drm_device *dev,
        }
 
        for_each_crtc_in_state(state, crtc, crtc_state, i) {
+               struct drm_pending_vblank_event *event = crtc_state->event;
                /*
-                * TEST_ONLY and PAGE_FLIP_EVENT are mutually
-                * exclusive, if they weren't, this code should be
-                * called on success for TEST_ONLY too.
+                * Free the allocated event. drm_atomic_helper_setup_commit
+                * can allocate an event too, so only free it if it's ours
+                * to prevent a double free in drm_atomic_state_clear.
                 */
-               if (crtc_state->event)
-                       drm_event_cancel_free(dev, &crtc_state->event->base);
+               if (event && (event->base.fence || event->base.file_priv)) {
+                       drm_event_cancel_free(dev, &event->base);
+                       crtc_state->event = NULL;
+               }
        }
 
        if (!fence_state)
index 34f757bcabae8d88382f18392467e67f4e0f6100..4594477dee00bc0ffb7847da556985848df717e7 100644 (file)
@@ -1666,9 +1666,6 @@ int drm_atomic_helper_prepare_planes(struct drm_device *dev,
 
                funcs = plane->helper_private;
 
-               if (!drm_atomic_helper_framebuffer_changed(dev, state, plane_state->crtc))
-                       continue;
-
                if (funcs->prepare_fb) {
                        ret = funcs->prepare_fb(plane, plane_state);
                        if (ret)
@@ -1685,9 +1682,6 @@ fail:
                if (j >= i)
                        continue;
 
-               if (!drm_atomic_helper_framebuffer_changed(dev, state, plane_state->crtc))
-                       continue;
-
                funcs = plane->helper_private;
 
                if (funcs->cleanup_fb)
@@ -1954,9 +1948,6 @@ void drm_atomic_helper_cleanup_planes(struct drm_device *dev,
        for_each_plane_in_state(old_state, plane, plane_state, i) {
                const struct drm_plane_helper_funcs *funcs;
 
-               if (!drm_atomic_helper_framebuffer_changed(dev, old_state, plane_state->crtc))
-                       continue;
-
                funcs = plane->helper_private;
 
                if (funcs->cleanup_fb)
index 5a452628939272969c54239498b9a1ca86cd4a67..7a7019ac93884eeeba046ad62b3c81037796beb7 100644 (file)
@@ -225,6 +225,7 @@ int drm_connector_init(struct drm_device *dev,
 
        INIT_LIST_HEAD(&connector->probed_modes);
        INIT_LIST_HEAD(&connector->modes);
+       mutex_init(&connector->mutex);
        connector->edid_blob_ptr = NULL;
        connector->status = connector_status_unknown;
 
@@ -359,6 +360,8 @@ void drm_connector_cleanup(struct drm_connector *connector)
                connector->funcs->atomic_destroy_state(connector,
                                                       connector->state);
 
+       mutex_destroy(&connector->mutex);
+
        memset(connector, 0, sizeof(*connector));
 }
 EXPORT_SYMBOL(drm_connector_cleanup);
@@ -374,14 +377,18 @@ EXPORT_SYMBOL(drm_connector_cleanup);
  */
 int drm_connector_register(struct drm_connector *connector)
 {
-       int ret;
+       int ret = 0;
 
-       if (connector->registered)
+       if (!connector->dev->registered)
                return 0;
 
+       mutex_lock(&connector->mutex);
+       if (connector->registered)
+               goto unlock;
+
        ret = drm_sysfs_connector_add(connector);
        if (ret)
-               return ret;
+               goto unlock;
 
        ret = drm_debugfs_connector_add(connector);
        if (ret) {
@@ -397,12 +404,14 @@ int drm_connector_register(struct drm_connector *connector)
        drm_mode_object_register(connector->dev, &connector->base);
 
        connector->registered = true;
-       return 0;
+       goto unlock;
 
 err_debugfs:
        drm_debugfs_connector_remove(connector);
 err_sysfs:
        drm_sysfs_connector_remove(connector);
+unlock:
+       mutex_unlock(&connector->mutex);
        return ret;
 }
 EXPORT_SYMBOL(drm_connector_register);
@@ -415,8 +424,11 @@ EXPORT_SYMBOL(drm_connector_register);
  */
 void drm_connector_unregister(struct drm_connector *connector)
 {
-       if (!connector->registered)
+       mutex_lock(&connector->mutex);
+       if (!connector->registered) {
+               mutex_unlock(&connector->mutex);
                return;
+       }
 
        if (connector->funcs->early_unregister)
                connector->funcs->early_unregister(connector);
@@ -425,6 +437,7 @@ void drm_connector_unregister(struct drm_connector *connector)
        drm_debugfs_connector_remove(connector);
 
        connector->registered = false;
+       mutex_unlock(&connector->mutex);
 }
 EXPORT_SYMBOL(drm_connector_unregister);
 
index aa644487749c9cbab104f2ea77c819baf7d6b250..f59771da52eec030270d1d3e2d2038dc64e0d9f1 100644 (file)
@@ -1817,7 +1817,7 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
                                mgr->payloads[i].vcpi = req_payload.vcpi;
                        } else if (mgr->payloads[i].num_slots) {
                                mgr->payloads[i].num_slots = 0;
-                               drm_dp_destroy_payload_step1(mgr, port, port->vcpi.vcpi, &mgr->payloads[i]);
+                               drm_dp_destroy_payload_step1(mgr, port, mgr->payloads[i].vcpi, &mgr->payloads[i]);
                                req_payload.payload_state = mgr->payloads[i].payload_state;
                                mgr->payloads[i].start_slot = 0;
                        }
index a525751b4559e9f3cc99850d5aac6e5c3eb6f16c..6594b4088f11bc8e5aa6a6c308ba3fdf633921a3 100644 (file)
@@ -745,6 +745,8 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
        if (ret)
                goto err_minors;
 
+       dev->registered = true;
+
        if (dev->driver->load) {
                ret = dev->driver->load(dev, flags);
                if (ret)
@@ -785,6 +787,8 @@ void drm_dev_unregister(struct drm_device *dev)
 
        drm_lastclose(dev);
 
+       dev->registered = false;
+
        if (drm_core_check_feature(dev, DRIVER_MODESET))
                drm_modeset_unregister_all(dev);
 
index 1d6c335584ec37d1ea399ccb4a07fd6e0120064e..33cd51632721fdb0eb76dc0b59cc10353f3366c4 100644 (file)
@@ -376,7 +376,7 @@ void drm_gem_cma_describe(struct drm_gem_cma_object *cma_obj,
        off = drm_vma_node_start(&obj->vma_node);
 
        seq_printf(m, "%2d (%2d) %08llx %pad %p %zu",
-                       obj->name, obj->refcount.refcount.counter,
+                       obj->name, kref_read(&obj->refcount),
                        off, &cma_obj->paddr, cma_obj->vaddr, obj->size);
 
        seq_printf(m, "\n");
index ffb2ab389d1d14863ed1e69c17419d560910afe5..6b68e90884360ff3ed724d2b2f7b667338324b00 100644 (file)
@@ -118,7 +118,7 @@ static int drm_gem_one_name_info(int id, void *ptr, void *data)
        seq_printf(m, "%6d %8zd %7d %8d\n",
                   obj->name, obj->size,
                   obj->handle_count,
-                  atomic_read(&obj->refcount.refcount));
+                  kref_read(&obj->refcount));
        return 0;
 }
 
index 9f17085b1fdd5ad6ccdd0266760788d10b1d6697..c6885a4911c093746a8bfbe191356130534378d9 100644 (file)
@@ -159,7 +159,7 @@ EXPORT_SYMBOL(drm_mode_object_find);
 void drm_mode_object_unreference(struct drm_mode_object *obj)
 {
        if (obj->free_cb) {
-               DRM_DEBUG("OBJ ID: %d (%d)\n", obj->id, atomic_read(&obj->refcount.refcount));
+               DRM_DEBUG("OBJ ID: %d (%d)\n", obj->id, kref_read(&obj->refcount));
                kref_put(&obj->refcount, obj->free_cb);
        }
 }
@@ -176,7 +176,7 @@ EXPORT_SYMBOL(drm_mode_object_unreference);
 void drm_mode_object_reference(struct drm_mode_object *obj)
 {
        if (obj->free_cb) {
-               DRM_DEBUG("OBJ ID: %d (%d)\n", obj->id, atomic_read(&obj->refcount.refcount));
+               DRM_DEBUG("OBJ ID: %d (%d)\n", obj->id, kref_read(&obj->refcount));
                kref_get(&obj->refcount);
        }
 }
index ac6a35212501eb56784cf14c25867fbfeda88f47..e6b19bc9021ae0634eb524af3e9f30d8b5f445ab 100644 (file)
@@ -1460,6 +1460,13 @@ drm_mode_create_from_cmdline_mode(struct drm_device *dev,
                return NULL;
 
        mode->type |= DRM_MODE_TYPE_USERDEF;
+       /* fix up 1368x768: GFT/CVT can't express 1366 width due to alignment */
+       if (cmd->xres == 1366 && mode->hdisplay == 1368) {
+               mode->hdisplay = 1366;
+               mode->hsync_start--;
+               mode->hsync_end--;
+               drm_mode_set_name(mode);
+       }
        drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
        return mode;
 }
index ac953f037be7efb7edac1c67d43e46b20b3be365..cf8f0128c161ed6e1034322066c49d53a80b9e92 100644 (file)
@@ -143,8 +143,18 @@ void drm_kms_helper_poll_enable_locked(struct drm_device *dev)
        }
 
        if (dev->mode_config.delayed_event) {
+               /*
+                * FIXME:
+                *
+                * Use short (1s) delay to handle the initial delayed event.
+                * This delay should not be needed, but Optimus/nouveau will
+                * fail in a mysterious way if the delayed event is handled as
+                * soon as possible like it is done in
+                * drm_helper_probe_single_connector_modes() in case the poll
+                * was enabled before.
+                */
                poll = true;
-               delay = 0;
+               delay = HZ;
        }
 
        if (poll)
index 114dddbd297bacf36406b7a4abdff39e71cdd81b..aa6e35ddc87f5b5596a5f318ad7d2c5d7098449c 100644 (file)
@@ -486,7 +486,7 @@ static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
 
        seq_printf(m, "%08x: %c %2d (%2d) %08lx %p %zd\n",
                        etnaviv_obj->flags, is_active(etnaviv_obj) ? 'A' : 'I',
-                       obj->name, obj->refcount.refcount.counter,
+                       obj->name, kref_read(&obj->refcount),
                        off, etnaviv_obj->vaddr, obj->size);
 
        rcu_read_lock();
index 169ac96e8f0861f9648e0e3ca3292ca1da61556c..fe0e85b41310a8fa24ba0f6caaa598edc669a99c 100644 (file)
@@ -116,9 +116,14 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
                struct list_head list;
                bool found;
 
+               /*
+                * XXX: The DRM_MM_SEARCH_BELOW is really a hack to trick
+                * drm_mm into giving out a low IOVA after address space
+                * rollover. This needs a proper fix.
+                */
                ret = drm_mm_insert_node_in_range(&mmu->mm, node,
                        size, 0, mmu->last_iova, ~0UL,
-                       DRM_MM_SEARCH_DEFAULT);
+                       mmu->last_iova ? DRM_MM_SEARCH_DEFAULT : DRM_MM_SEARCH_BELOW);
 
                if (ret != -ENOSPC)
                        break;
index 6ca1f3117fe8d524fbb4dfed89692c527c5bd41c..75eeb831ed6a1d5d00fa6659d967f9b0b2ad34ff 100644 (file)
@@ -46,7 +46,8 @@ enum decon_flag_bits {
        BIT_CLKS_ENABLED,
        BIT_IRQS_ENABLED,
        BIT_WIN_UPDATED,
-       BIT_SUSPENDED
+       BIT_SUSPENDED,
+       BIT_REQUEST_UPDATE
 };
 
 struct decon_context {
@@ -141,12 +142,6 @@ static void decon_commit(struct exynos_drm_crtc *crtc)
                m->crtc_vsync_end = m->crtc_vsync_start + 1;
        }
 
-       decon_set_bits(ctx, DECON_VIDCON0, VIDCON0_ENVID, 0);
-
-       /* enable clock gate */
-       val = CMU_CLKGAGE_MODE_SFR_F | CMU_CLKGAGE_MODE_MEM_F;
-       writel(val, ctx->addr + DECON_CMU);
-
        if (ctx->out_type & (IFTYPE_I80 | I80_HW_TRG))
                decon_setup_trigger(ctx);
 
@@ -315,6 +310,7 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
 
        /* window enable */
        decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, ~0);
+       set_bit(BIT_REQUEST_UPDATE, &ctx->flags);
 }
 
 static void decon_disable_plane(struct exynos_drm_crtc *crtc,
@@ -327,6 +323,7 @@ static void decon_disable_plane(struct exynos_drm_crtc *crtc,
                return;
 
        decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, 0);
+       set_bit(BIT_REQUEST_UPDATE, &ctx->flags);
 }
 
 static void decon_atomic_flush(struct exynos_drm_crtc *crtc)
@@ -340,8 +337,8 @@ static void decon_atomic_flush(struct exynos_drm_crtc *crtc)
        for (i = ctx->first_win; i < WINDOWS_NR; i++)
                decon_shadow_protect_win(ctx, i, false);
 
-       /* standalone update */
-       decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0);
+       if (test_and_clear_bit(BIT_REQUEST_UPDATE, &ctx->flags))
+               decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0);
 
        if (ctx->out_type & IFTYPE_I80)
                set_bit(BIT_WIN_UPDATED, &ctx->flags);
index 0d41ebc4aea63a6a7e101b5439739a8faad989fd..f7bce8603958da2760080fe2bd2b30ff83553d9d 100644 (file)
 #include "i915_drv.h"
 #include "gvt.h"
 
-#define MB_TO_BYTES(mb) ((mb) << 20ULL)
-#define BYTES_TO_MB(b) ((b) >> 20ULL)
-
-#define HOST_LOW_GM_SIZE MB_TO_BYTES(128)
-#define HOST_HIGH_GM_SIZE MB_TO_BYTES(384)
-#define HOST_FENCE 4
-
 static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
 {
        struct intel_gvt *gvt = vgpu->gvt;
@@ -165,6 +158,14 @@ void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
        POSTING_READ(fence_reg_lo);
 }
 
+static void _clear_vgpu_fence(struct intel_vgpu *vgpu)
+{
+       int i;
+
+       for (i = 0; i < vgpu_fence_sz(vgpu); i++)
+               intel_vgpu_write_fence(vgpu, i, 0);
+}
+
 static void free_vgpu_fence(struct intel_vgpu *vgpu)
 {
        struct intel_gvt *gvt = vgpu->gvt;
@@ -178,9 +179,9 @@ static void free_vgpu_fence(struct intel_vgpu *vgpu)
        intel_runtime_pm_get(dev_priv);
 
        mutex_lock(&dev_priv->drm.struct_mutex);
+       _clear_vgpu_fence(vgpu);
        for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
                reg = vgpu->fence.regs[i];
-               intel_vgpu_write_fence(vgpu, i, 0);
                list_add_tail(&reg->link,
                              &dev_priv->mm.fence_list);
        }
@@ -208,13 +209,14 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
                        continue;
                list_del(pos);
                vgpu->fence.regs[i] = reg;
-               intel_vgpu_write_fence(vgpu, i, 0);
                if (++i == vgpu_fence_sz(vgpu))
                        break;
        }
        if (i != vgpu_fence_sz(vgpu))
                goto out_free_fence;
 
+       _clear_vgpu_fence(vgpu);
+
        mutex_unlock(&dev_priv->drm.struct_mutex);
        intel_runtime_pm_put(dev_priv);
        return 0;
@@ -313,6 +315,22 @@ void intel_vgpu_free_resource(struct intel_vgpu *vgpu)
        free_resource(vgpu);
 }
 
+/**
+ * intel_vgpu_reset_resource - reset resource state owned by a vGPU
+ * @vgpu: a vGPU
+ *
+ * This function is used to reset resource state owned by a vGPU.
+ *
+ */
+void intel_vgpu_reset_resource(struct intel_vgpu *vgpu)
+{
+       struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+
+       intel_runtime_pm_get(dev_priv);
+       _clear_vgpu_fence(vgpu);
+       intel_runtime_pm_put(dev_priv);
+}
+
 /**
  * intel_alloc_vgpu_resource - allocate HW resource for a vGPU
  * @vgpu: vGPU
index 711c31c8d8b46c3c51e4f93a741daecf4b28ce31..4a6a2ed65732e1fde39457148165274deda52db6 100644 (file)
@@ -282,3 +282,77 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
        }
        return 0;
 }
+
+/**
+ * intel_vgpu_init_cfg_space - init vGPU configuration space when create vGPU
+ *
+ * @vgpu: a vGPU
+ * @primary: is the vGPU presented as primary
+ *
+ */
+void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
+                              bool primary)
+{
+       struct intel_gvt *gvt = vgpu->gvt;
+       const struct intel_gvt_device_info *info = &gvt->device_info;
+       u16 *gmch_ctl;
+       int i;
+
+       memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space,
+              info->cfg_space_size);
+
+       if (!primary) {
+               vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] =
+                       INTEL_GVT_PCI_CLASS_VGA_OTHER;
+               vgpu_cfg_space(vgpu)[PCI_CLASS_PROG] =
+                       INTEL_GVT_PCI_CLASS_VGA_OTHER;
+       }
+
+       /* Show guest that there isn't any stolen memory.*/
+       gmch_ctl = (u16 *)(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_GMCH_CONTROL);
+       *gmch_ctl &= ~(BDW_GMCH_GMS_MASK << BDW_GMCH_GMS_SHIFT);
+
+       intel_vgpu_write_pci_bar(vgpu, PCI_BASE_ADDRESS_2,
+                                gvt_aperture_pa_base(gvt), true);
+
+       vgpu_cfg_space(vgpu)[PCI_COMMAND] &= ~(PCI_COMMAND_IO
+                                            | PCI_COMMAND_MEMORY
+                                            | PCI_COMMAND_MASTER);
+       /*
+        * Clear the bar upper 32bit and let guest to assign the new value
+        */
+       memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4);
+       memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4);
+       memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4);
+
+       for (i = 0; i < INTEL_GVT_MAX_BAR_NUM; i++) {
+               vgpu->cfg_space.bar[i].size = pci_resource_len(
+                                             gvt->dev_priv->drm.pdev, i * 2);
+               vgpu->cfg_space.bar[i].tracked = false;
+       }
+}
+
+/**
+ * intel_vgpu_reset_cfg_space - reset vGPU configuration space
+ *
+ * @vgpu: a vGPU
+ *
+ */
+void intel_vgpu_reset_cfg_space(struct intel_vgpu *vgpu)
+{
+       u8 cmd = vgpu_cfg_space(vgpu)[PCI_COMMAND];
+       bool primary = vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] !=
+                               INTEL_GVT_PCI_CLASS_VGA_OTHER;
+
+       if (cmd & PCI_COMMAND_MEMORY) {
+               trap_gttmmio(vgpu, false);
+               map_aperture(vgpu, false);
+       }
+
+       /**
+        * Currently we only do such reset when vGPU is not
+        * owned by any VM, so we simply restore entire cfg
+        * space to default value.
+        */
+       intel_vgpu_init_cfg_space(vgpu, primary);
+}
index d26a092c70e8c8fe2a14df28dd22e4253f739a16..e4563984cb1e8106cda73e8d0e12f12d10f6a1d2 100644 (file)
@@ -481,7 +481,6 @@ struct parser_exec_state {
        (s->vgpu->gvt->device_info.gmadr_bytes_in_cmd >> 2)
 
 static unsigned long bypass_scan_mask = 0;
-static bool bypass_batch_buffer_scan = true;
 
 /* ring ALL, type = 0 */
 static struct sub_op_bits sub_op_mi[] = {
@@ -1525,9 +1524,6 @@ static int batch_buffer_needs_scan(struct parser_exec_state *s)
 {
        struct intel_gvt *gvt = s->vgpu->gvt;
 
-       if (bypass_batch_buffer_scan)
-               return 0;
-
        if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) {
                /* BDW decides privilege based on address space */
                if (cmd_val(s, 0) & (1 << 8))
index f32bb6f6495ce0aafddf35d920298c5d315af9bc..34083731669d8cbe55b94de2e5b3585aa73a7039 100644 (file)
@@ -364,58 +364,30 @@ static void free_workload(struct intel_vgpu_workload *workload)
 #define get_desc_from_elsp_dwords(ed, i) \
        ((struct execlist_ctx_descriptor_format *)&((ed)->data[i * 2]))
 
-
-#define BATCH_BUFFER_ADDR_MASK ((1UL << 32) - (1U << 2))
-#define BATCH_BUFFER_ADDR_HIGH_MASK ((1UL << 16) - (1U))
-static int set_gma_to_bb_cmd(struct intel_shadow_bb_entry *entry_obj,
-                            unsigned long add, int gmadr_bytes)
-{
-       if (WARN_ON(gmadr_bytes != 4 && gmadr_bytes != 8))
-               return -1;
-
-       *((u32 *)(entry_obj->bb_start_cmd_va + (1 << 2))) = add &
-               BATCH_BUFFER_ADDR_MASK;
-       if (gmadr_bytes == 8) {
-               *((u32 *)(entry_obj->bb_start_cmd_va + (2 << 2))) =
-                       add & BATCH_BUFFER_ADDR_HIGH_MASK;
-       }
-
-       return 0;
-}
-
 static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
 {
-       int gmadr_bytes = workload->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
+       const int gmadr_bytes = workload->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
+       struct intel_shadow_bb_entry *entry_obj;
 
        /* pin the gem object to ggtt */
-       if (!list_empty(&workload->shadow_bb)) {
-               struct intel_shadow_bb_entry *entry_obj =
-                       list_first_entry(&workload->shadow_bb,
-                                        struct intel_shadow_bb_entry,
-                                        list);
-               struct intel_shadow_bb_entry *temp;
+       list_for_each_entry(entry_obj, &workload->shadow_bb, list) {
+               struct i915_vma *vma;
 
-               list_for_each_entry_safe(entry_obj, temp, &workload->shadow_bb,
-                               list) {
-                       struct i915_vma *vma;
-
-                       vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0,
-                                                      4, 0);
-                       if (IS_ERR(vma)) {
-                               gvt_err("Cannot pin\n");
-                               return;
-                       }
-
-                       /* FIXME: we are not tracking our pinned VMA leaving it
-                        * up to the core to fix up the stray pin_count upon
-                        * free.
-                        */
-
-                       /* update the relocate gma with shadow batch buffer*/
-                       set_gma_to_bb_cmd(entry_obj,
-                                         i915_ggtt_offset(vma),
-                                         gmadr_bytes);
+               vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0, 4, 0);
+               if (IS_ERR(vma)) {
+                       gvt_err("Cannot pin\n");
+                       return;
                }
+
+               /* FIXME: we are not tracking our pinned VMA leaving it
+                * up to the core to fix up the stray pin_count upon
+                * free.
+                */
+
+               /* update the relocate gma with shadow batch buffer*/
+               entry_obj->bb_start_cmd_va[1] = i915_ggtt_offset(vma);
+               if (gmadr_bytes == 8)
+                       entry_obj->bb_start_cmd_va[2] = 0;
        }
 }
 
@@ -826,7 +798,7 @@ int intel_vgpu_init_execlist(struct intel_vgpu *vgpu)
                INIT_LIST_HEAD(&vgpu->workload_q_head[i]);
        }
 
-       vgpu->workloads = kmem_cache_create("gvt-g vgpu workload",
+       vgpu->workloads = kmem_cache_create("gvt-g_vgpu_workload",
                        sizeof(struct intel_vgpu_workload), 0,
                        SLAB_HWCACHE_ALIGN,
                        NULL);
index 6c5fdf5b2ce2a9d407839a3a28a7e067a5630d8d..47dec4acf7ff12951eb592e2b115953e961f6bdf 100644 (file)
@@ -240,15 +240,8 @@ static inline int get_pse_type(int type)
 static u64 read_pte64(struct drm_i915_private *dev_priv, unsigned long index)
 {
        void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
-       u64 pte;
 
-#ifdef readq
-       pte = readq(addr);
-#else
-       pte = ioread32(addr);
-       pte |= (u64)ioread32(addr + 4) << 32;
-#endif
-       return pte;
+       return readq(addr);
 }
 
 static void write_pte64(struct drm_i915_private *dev_priv,
@@ -256,12 +249,8 @@ static void write_pte64(struct drm_i915_private *dev_priv,
 {
        void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
 
-#ifdef writeq
        writeq(pte, addr);
-#else
-       iowrite32((u32)pte, addr);
-       iowrite32(pte >> 32, addr + 4);
-#endif
+
        I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
        POSTING_READ(GFX_FLSH_CNTL_GEN6);
 }
@@ -1380,8 +1369,7 @@ static int gen8_mm_alloc_page_table(struct intel_vgpu_mm *mm)
                        info->gtt_entry_size;
                mem = kzalloc(mm->has_shadow_page_table ?
                        mm->page_table_entry_size * 2
-                               : mm->page_table_entry_size,
-                       GFP_ATOMIC);
+                               : mm->page_table_entry_size, GFP_KERNEL);
                if (!mem)
                        return -ENOMEM;
                mm->virtual_page_table = mem;
@@ -1532,7 +1520,7 @@ struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu,
        struct intel_vgpu_mm *mm;
        int ret;
 
-       mm = kzalloc(sizeof(*mm), GFP_ATOMIC);
+       mm = kzalloc(sizeof(*mm), GFP_KERNEL);
        if (!mm) {
                ret = -ENOMEM;
                goto fail;
@@ -1886,30 +1874,27 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
        struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
        int page_entry_num = GTT_PAGE_SIZE >>
                                vgpu->gvt->device_info.gtt_entry_size_shift;
-       struct page *scratch_pt;
+       void *scratch_pt;
        unsigned long mfn;
        int i;
-       void *p;
 
        if (WARN_ON(type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX))
                return -EINVAL;
 
-       scratch_pt = alloc_page(GFP_KERNEL | GFP_ATOMIC | __GFP_ZERO);
+       scratch_pt = (void *)get_zeroed_page(GFP_KERNEL);
        if (!scratch_pt) {
                gvt_err("fail to allocate scratch page\n");
                return -ENOMEM;
        }
 
-       p = kmap_atomic(scratch_pt);
-       mfn = intel_gvt_hypervisor_virt_to_mfn(p);
+       mfn = intel_gvt_hypervisor_virt_to_mfn(scratch_pt);
        if (mfn == INTEL_GVT_INVALID_ADDR) {
-               gvt_err("fail to translate vaddr:0x%llx\n", (u64)p);
-               kunmap_atomic(p);
-               __free_page(scratch_pt);
+               gvt_err("fail to translate vaddr:0x%lx\n", (unsigned long)scratch_pt);
+               free_page((unsigned long)scratch_pt);
                return -EFAULT;
        }
        gtt->scratch_pt[type].page_mfn = mfn;
-       gtt->scratch_pt[type].page = scratch_pt;
+       gtt->scratch_pt[type].page = virt_to_page(scratch_pt);
        gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n",
                        vgpu->id, type, mfn);
 
@@ -1918,7 +1903,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
         * scratch_pt[type] indicate the scratch pt/scratch page used by the
         * 'type' pt.
         * e.g. scratch_pt[GTT_TYPE_PPGTT_PDE_PT] is used by
-        * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scatch_pt it self
+        * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scratch_pt it self
         * is GTT_TYPE_PPGTT_PTE_PT, and full filled by scratch page mfn.
         */
        if (type > GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX) {
@@ -1936,11 +1921,9 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
                        se.val64 |= PPAT_CACHED_INDEX;
 
                for (i = 0; i < page_entry_num; i++)
-                       ops->set_entry(p, &se, i, false, 0, vgpu);
+                       ops->set_entry(scratch_pt, &se, i, false, 0, vgpu);
        }
 
-       kunmap_atomic(p);
-
        return 0;
 }
 
@@ -2208,7 +2191,7 @@ int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu,
 int intel_gvt_init_gtt(struct intel_gvt *gvt)
 {
        int ret;
-       void *page_addr;
+       void *page;
 
        gvt_dbg_core("init gtt\n");
 
@@ -2221,17 +2204,14 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
                return -ENODEV;
        }
 
-       gvt->gtt.scratch_ggtt_page =
-               alloc_page(GFP_KERNEL | GFP_ATOMIC | __GFP_ZERO);
-       if (!gvt->gtt.scratch_ggtt_page) {
+       page = (void *)get_zeroed_page(GFP_KERNEL);
+       if (!page) {
                gvt_err("fail to allocate scratch ggtt page\n");
                return -ENOMEM;
        }
+       gvt->gtt.scratch_ggtt_page = virt_to_page(page);
 
-       page_addr = page_address(gvt->gtt.scratch_ggtt_page);
-
-       gvt->gtt.scratch_ggtt_mfn =
-               intel_gvt_hypervisor_virt_to_mfn(page_addr);
+       gvt->gtt.scratch_ggtt_mfn = intel_gvt_hypervisor_virt_to_mfn(page);
        if (gvt->gtt.scratch_ggtt_mfn == INTEL_GVT_INVALID_ADDR) {
                gvt_err("fail to translate scratch ggtt page\n");
                __free_page(gvt->gtt.scratch_ggtt_page);
@@ -2297,3 +2277,30 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu)
        for (offset = 0; offset < num_entries; offset++)
                ops->set_entry(NULL, &e, index + offset, false, 0, vgpu);
 }
+
+/**
+ * intel_vgpu_reset_gtt - reset the all GTT related status
+ * @vgpu: a vGPU
+ * @dmlr: true for vGPU Device Model Level Reset, false for GT Reset
+ *
+ * This function is called from vfio core to reset reset all
+ * GTT related status, including GGTT, PPGTT, scratch page.
+ *
+ */
+void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu, bool dmlr)
+{
+       int i;
+
+       ppgtt_free_all_shadow_page(vgpu);
+       if (!dmlr)
+               return;
+
+       intel_vgpu_reset_ggtt(vgpu);
+
+       /* clear scratch page for security */
+       for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
+               if (vgpu->gtt.scratch_pt[i].page != NULL)
+                       memset(page_address(vgpu->gtt.scratch_pt[i].page),
+                               0, PAGE_SIZE);
+       }
+}
index b315ab3593ec37f2e73faf564a6d6c9fee9e7c81..f88eb5e89bea09f7b6e8aba2e521748d54d28b77 100644 (file)
@@ -208,6 +208,7 @@ extern void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu);
 void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu);
 
 extern int intel_gvt_init_gtt(struct intel_gvt *gvt);
+extern void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu, bool dmlr);
 extern void intel_gvt_clean_gtt(struct intel_gvt *gvt);
 
 extern struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu,
index 398877c3d2fd98a19ba8f45712ef444a4fcb0896..e6bf5c533fbe5c795a7cef6baef2815aea797ce7 100644 (file)
@@ -201,6 +201,8 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
        intel_gvt_hypervisor_host_exit(&dev_priv->drm.pdev->dev, gvt);
        intel_gvt_clean_vgpu_types(gvt);
 
+       idr_destroy(&gvt->vgpu_idr);
+
        kfree(dev_priv->gvt);
        dev_priv->gvt = NULL;
 }
@@ -237,6 +239,8 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
 
        gvt_dbg_core("init gvt device\n");
 
+       idr_init(&gvt->vgpu_idr);
+
        mutex_init(&gvt->lock);
        gvt->dev_priv = dev_priv;
 
@@ -244,7 +248,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
 
        ret = intel_gvt_setup_mmio_info(gvt);
        if (ret)
-               return ret;
+               goto out_clean_idr;
 
        ret = intel_gvt_load_firmware(gvt);
        if (ret)
@@ -313,6 +317,8 @@ out_free_firmware:
        intel_gvt_free_firmware(gvt);
 out_clean_mmio_info:
        intel_gvt_clean_mmio_info(gvt);
+out_clean_idr:
+       idr_destroy(&gvt->vgpu_idr);
        kfree(gvt);
        return ret;
 }
index 0af17016f33f24f40d338715e5c78bfac8058e92..e227caf5859ebdfd2c420bc994d42a5734ba4272 100644 (file)
@@ -323,6 +323,7 @@ struct intel_vgpu_creation_params {
 
 int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu,
                              struct intel_vgpu_creation_params *param);
+void intel_vgpu_reset_resource(struct intel_vgpu *vgpu);
 void intel_vgpu_free_resource(struct intel_vgpu *vgpu);
 void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
        u32 fence, u64 value);
@@ -375,6 +376,8 @@ void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt);
 struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
                                         struct intel_vgpu_type *type);
 void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu);
+void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
+                                unsigned int engine_mask);
 void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu);
 
 
@@ -411,6 +414,10 @@ int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index,
 int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
                             unsigned long *g_index);
 
+void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
+               bool primary);
+void intel_vgpu_reset_cfg_space(struct intel_vgpu *vgpu);
+
 int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset,
                void *p_data, unsigned int bytes);
 
@@ -424,7 +431,6 @@ void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu);
 int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa);
 
 int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci);
-int setup_vgpu_mmio(struct intel_vgpu *vgpu);
 void populate_pvinfo_page(struct intel_vgpu *vgpu);
 
 struct intel_gvt_ops {
index 522809710312c25767209656133651971dd6a01f..ab2ea157da4cd6ab0a39fbba8bae196806b843c6 100644 (file)
@@ -93,7 +93,8 @@ static void write_vreg(struct intel_vgpu *vgpu, unsigned int offset,
 static int new_mmio_info(struct intel_gvt *gvt,
                u32 offset, u32 flags, u32 size,
                u32 addr_mask, u32 ro_mask, u32 device,
-               void *read, void *write)
+               int (*read)(struct intel_vgpu *, unsigned int, void *, unsigned int),
+               int (*write)(struct intel_vgpu *, unsigned int, void *, unsigned int))
 {
        struct intel_gvt_mmio_info *info, *p;
        u32 start, end, i;
@@ -219,7 +220,7 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu,
                default:
                        /*should not hit here*/
                        gvt_err("invalid forcewake offset 0x%x\n", offset);
-                       return 1;
+                       return -EINVAL;
                }
        } else {
                ack_reg_offset = FORCEWAKE_ACK_HSW_REG;
@@ -230,77 +231,45 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu,
        return 0;
 }
 
-static int handle_device_reset(struct intel_vgpu *vgpu, unsigned int offset,
-               void *p_data, unsigned int bytes, unsigned long bitmap)
-{
-       struct intel_gvt_workload_scheduler *scheduler =
-               &vgpu->gvt->scheduler;
-
-       vgpu->resetting = true;
-
-       intel_vgpu_stop_schedule(vgpu);
-       /*
-        * The current_vgpu will set to NULL after stopping the
-        * scheduler when the reset is triggered by current vgpu.
-        */
-       if (scheduler->current_vgpu == NULL) {
-               mutex_unlock(&vgpu->gvt->lock);
-               intel_gvt_wait_vgpu_idle(vgpu);
-               mutex_lock(&vgpu->gvt->lock);
-       }
-
-       intel_vgpu_reset_execlist(vgpu, bitmap);
-
-       /* full GPU reset */
-       if (bitmap == 0xff) {
-               mutex_unlock(&vgpu->gvt->lock);
-               intel_vgpu_clean_gtt(vgpu);
-               mutex_lock(&vgpu->gvt->lock);
-               setup_vgpu_mmio(vgpu);
-               populate_pvinfo_page(vgpu);
-               intel_vgpu_init_gtt(vgpu);
-       }
-
-       vgpu->resetting = false;
-
-       return 0;
-}
-
 static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
-               void *p_data, unsigned int bytes)
+                           void *p_data, unsigned int bytes)
 {
+       unsigned int engine_mask = 0;
        u32 data;
-       u64 bitmap = 0;
 
        write_vreg(vgpu, offset, p_data, bytes);
        data = vgpu_vreg(vgpu, offset);
 
        if (data & GEN6_GRDOM_FULL) {
                gvt_dbg_mmio("vgpu%d: request full GPU reset\n", vgpu->id);
-               bitmap = 0xff;
-       }
-       if (data & GEN6_GRDOM_RENDER) {
-               gvt_dbg_mmio("vgpu%d: request RCS reset\n", vgpu->id);
-               bitmap |= (1 << RCS);
-       }
-       if (data & GEN6_GRDOM_MEDIA) {
-               gvt_dbg_mmio("vgpu%d: request VCS reset\n", vgpu->id);
-               bitmap |= (1 << VCS);
-       }
-       if (data & GEN6_GRDOM_BLT) {
-               gvt_dbg_mmio("vgpu%d: request BCS Reset\n", vgpu->id);
-               bitmap |= (1 << BCS);
-       }
-       if (data & GEN6_GRDOM_VECS) {
-               gvt_dbg_mmio("vgpu%d: request VECS Reset\n", vgpu->id);
-               bitmap |= (1 << VECS);
-       }
-       if (data & GEN8_GRDOM_MEDIA2) {
-               gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id);
-               if (HAS_BSD2(vgpu->gvt->dev_priv))
-                       bitmap |= (1 << VCS2);
+               engine_mask = ALL_ENGINES;
+       } else {
+               if (data & GEN6_GRDOM_RENDER) {
+                       gvt_dbg_mmio("vgpu%d: request RCS reset\n", vgpu->id);
+                       engine_mask |= (1 << RCS);
+               }
+               if (data & GEN6_GRDOM_MEDIA) {
+                       gvt_dbg_mmio("vgpu%d: request VCS reset\n", vgpu->id);
+                       engine_mask |= (1 << VCS);
+               }
+               if (data & GEN6_GRDOM_BLT) {
+                       gvt_dbg_mmio("vgpu%d: request BCS Reset\n", vgpu->id);
+                       engine_mask |= (1 << BCS);
+               }
+               if (data & GEN6_GRDOM_VECS) {
+                       gvt_dbg_mmio("vgpu%d: request VECS Reset\n", vgpu->id);
+                       engine_mask |= (1 << VECS);
+               }
+               if (data & GEN8_GRDOM_MEDIA2) {
+                       gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id);
+                       if (HAS_BSD2(vgpu->gvt->dev_priv))
+                               engine_mask |= (1 << VCS2);
+               }
        }
-       return handle_device_reset(vgpu, offset, p_data, bytes, bitmap);
+
+       intel_gvt_reset_vgpu_locked(vgpu, false, engine_mask);
+
+       return 0;
 }
 
 static int gmbus_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
@@ -974,7 +943,7 @@ static int sbi_data_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
        return 0;
 }
 
-static bool sbi_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+static int sbi_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
                void *p_data, unsigned int bytes)
 {
        u32 data;
@@ -1366,7 +1335,6 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
 static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu,
                unsigned int offset, void *p_data, unsigned int bytes)
 {
-       int rc = 0;
        unsigned int id = 0;
 
        write_vreg(vgpu, offset, p_data, bytes);
@@ -1389,12 +1357,11 @@ static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu,
                id = VECS;
                break;
        default:
-               rc = -EINVAL;
-               break;
+               return -EINVAL;
        }
        set_bit(id, (void *)vgpu->tlb_handle_pending);
 
-       return rc;
+       return 0;
 }
 
 static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
index faaae07ae487277973533bbf907b6eefc2632a48..3f656e3a6e5a79a598934381d7f3a59e09eb8cd7 100644 (file)
@@ -230,8 +230,8 @@ static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt,
        return NULL;
 }
 
-static ssize_t available_instance_show(struct kobject *kobj, struct device *dev,
-               char *buf)
+static ssize_t available_instances_show(struct kobject *kobj,
+                                       struct device *dev, char *buf)
 {
        struct intel_vgpu_type *type;
        unsigned int num = 0;
@@ -269,12 +269,12 @@ static ssize_t description_show(struct kobject *kobj, struct device *dev,
                                type->fence);
 }
 
-static MDEV_TYPE_ATTR_RO(available_instance);
+static MDEV_TYPE_ATTR_RO(available_instances);
 static MDEV_TYPE_ATTR_RO(device_api);
 static MDEV_TYPE_ATTR_RO(description);
 
 static struct attribute *type_attrs[] = {
-       &mdev_type_attr_available_instance.attr,
+       &mdev_type_attr_available_instances.attr,
        &mdev_type_attr_device_api.attr,
        &mdev_type_attr_description.attr,
        NULL,
@@ -398,6 +398,7 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
        struct intel_vgpu_type *type;
        struct device *pdev;
        void *gvt;
+       int ret;
 
        pdev = mdev_parent_dev(mdev);
        gvt = kdev_to_i915(pdev)->gvt;
@@ -406,13 +407,15 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
        if (!type) {
                gvt_err("failed to find type %s to create\n",
                                                kobject_name(kobj));
-               return -EINVAL;
+               ret = -EINVAL;
+               goto out;
        }
 
        vgpu = intel_gvt_ops->vgpu_create(gvt, type);
        if (IS_ERR_OR_NULL(vgpu)) {
-               gvt_err("create intel vgpu failed\n");
-               return -EINVAL;
+               ret = vgpu == NULL ? -EFAULT : PTR_ERR(vgpu);
+               gvt_err("failed to create intel vgpu: %d\n", ret);
+               goto out;
        }
 
        INIT_WORK(&vgpu->vdev.release_work, intel_vgpu_release_work);
@@ -422,7 +425,10 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
 
        gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n",
                     dev_name(mdev_dev(mdev)));
-       return 0;
+       ret = 0;
+
+out:
+       return ret;
 }
 
 static int intel_vgpu_remove(struct mdev_device *mdev)
index 09c9450a19462e940eb4df2e71af1e10974ef878..4df078bc5d042b1f4fc411fbb0f98c83a3cba729 100644 (file)
@@ -125,25 +125,12 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
        if (WARN_ON(!reg_is_mmio(gvt, offset + bytes - 1)))
                goto err;
 
-       mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4));
-       if (!mmio && !vgpu->mmio.disable_warn_untrack) {
-               gvt_err("vgpu%d: read untracked MMIO %x len %d val %x\n",
-                               vgpu->id, offset, bytes, *(u32 *)p_data);
-
-               if (offset == 0x206c) {
-                       gvt_err("------------------------------------------\n");
-                       gvt_err("vgpu%d: likely triggers a gfx reset\n",
-                       vgpu->id);
-                       gvt_err("------------------------------------------\n");
-                       vgpu->mmio.disable_warn_untrack = true;
-               }
-       }
-
        if (!intel_gvt_mmio_is_unalign(gvt, offset)) {
                if (WARN_ON(!IS_ALIGNED(offset, bytes)))
                        goto err;
        }
 
+       mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4));
        if (mmio) {
                if (!intel_gvt_mmio_is_unalign(gvt, mmio->offset)) {
                        if (WARN_ON(offset + bytes > mmio->offset + mmio->size))
@@ -152,9 +139,23 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
                                goto err;
                }
                ret = mmio->read(vgpu, offset, p_data, bytes);
-       } else
+       } else {
                ret = intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
 
+               if (!vgpu->mmio.disable_warn_untrack) {
+                       gvt_err("vgpu%d: read untracked MMIO %x(%dB) val %x\n",
+                               vgpu->id, offset, bytes, *(u32 *)p_data);
+
+                       if (offset == 0x206c) {
+                               gvt_err("------------------------------------------\n");
+                               gvt_err("vgpu%d: likely triggers a gfx reset\n",
+                                       vgpu->id);
+                               gvt_err("------------------------------------------\n");
+                               vgpu->mmio.disable_warn_untrack = true;
+                       }
+               }
+       }
+
        if (ret)
                goto err;
 
@@ -302,3 +303,56 @@ err:
        mutex_unlock(&gvt->lock);
        return ret;
 }
+
+
+/**
+ * intel_vgpu_reset_mmio - reset virtual MMIO space
+ * @vgpu: a vGPU
+ *
+ */
+void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu)
+{
+       struct intel_gvt *gvt = vgpu->gvt;
+       const struct intel_gvt_device_info *info = &gvt->device_info;
+
+       memcpy(vgpu->mmio.vreg, gvt->firmware.mmio, info->mmio_size);
+       memcpy(vgpu->mmio.sreg, gvt->firmware.mmio, info->mmio_size);
+
+       vgpu_vreg(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0;
+
+       /* set the bit 0:2(Core C-State ) to C0 */
+       vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0;
+}
+
+/**
+ * intel_vgpu_init_mmio - init MMIO  space
+ * @vgpu: a vGPU
+ *
+ * Returns:
+ * Zero on success, negative error code if failed
+ */
+int intel_vgpu_init_mmio(struct intel_vgpu *vgpu)
+{
+       const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
+
+       vgpu->mmio.vreg = vzalloc(info->mmio_size * 2);
+       if (!vgpu->mmio.vreg)
+               return -ENOMEM;
+
+       vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size;
+
+       intel_vgpu_reset_mmio(vgpu);
+
+       return 0;
+}
+
+/**
+ * intel_vgpu_clean_mmio - clean MMIO space
+ * @vgpu: a vGPU
+ *
+ */
+void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu)
+{
+       vfree(vgpu->mmio.vreg);
+       vgpu->mmio.vreg = vgpu->mmio.sreg = NULL;
+}
index 87d5b5e366a3c97e7b2da79c1b8e66d97823e916..3bc620f56f351e774dc8658c9f06c79d0b24446b 100644 (file)
@@ -86,6 +86,10 @@ struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt,
        *offset; \
 })
 
+int intel_vgpu_init_mmio(struct intel_vgpu *vgpu);
+void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu);
+void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu);
+
 int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa);
 
 int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa,
index 81cd921770c6db7748ad8e880180c3fb6f4c448a..d9fb41ab71198cb19b1ade4796f687af49444c80 100644 (file)
@@ -36,9 +36,9 @@ static int init_vgpu_opregion(struct intel_vgpu *vgpu, u32 gpa)
                        vgpu->id))
                return -EINVAL;
 
-       vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_ATOMIC |
-                       GFP_DMA32 | __GFP_ZERO,
-                       INTEL_GVT_OPREGION_PORDER);
+       vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_KERNEL |
+                       __GFP_ZERO,
+                       get_order(INTEL_GVT_OPREGION_SIZE));
 
        if (!vgpu_opregion(vgpu)->va)
                return -ENOMEM;
@@ -97,7 +97,7 @@ void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu)
        if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_XEN) {
                map_vgpu_opregion(vgpu, false);
                free_pages((unsigned long)vgpu_opregion(vgpu)->va,
-                               INTEL_GVT_OPREGION_PORDER);
+                               get_order(INTEL_GVT_OPREGION_SIZE));
 
                vgpu_opregion(vgpu)->va = NULL;
        }
index 0dfe789d8f02b64ade88381b2a69fbca09f2862a..fbd023a16f18163d6dcb52bcf795675e3c16a4f7 100644 (file)
@@ -50,8 +50,7 @@
 #define INTEL_GVT_OPREGION_PARM                   0x204
 
 #define INTEL_GVT_OPREGION_PAGES       2
-#define INTEL_GVT_OPREGION_PORDER      1
-#define INTEL_GVT_OPREGION_SIZE                (2 * 4096)
+#define INTEL_GVT_OPREGION_SIZE                (INTEL_GVT_OPREGION_PAGES * PAGE_SIZE)
 
 #define VGT_SPRSTRIDE(pipe)    _PIPE(pipe, _SPRA_STRIDE, _PLANE_STRIDE_2_B)
 
index 4db24225023520b879b12fce1a924cc236384bd5..e91885dffeff8d76d73f1942a9e3c91b5087639c 100644 (file)
@@ -350,13 +350,15 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
 {
        struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
        struct intel_vgpu_workload *workload;
+       struct intel_vgpu *vgpu;
        int event;
 
        mutex_lock(&gvt->lock);
 
        workload = scheduler->current_workload[ring_id];
+       vgpu = workload->vgpu;
 
-       if (!workload->status && !workload->vgpu->resetting) {
+       if (!workload->status && !vgpu->resetting) {
                wait_event(workload->shadow_ctx_status_wq,
                           !atomic_read(&workload->shadow_ctx_active));
 
@@ -364,8 +366,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
 
                for_each_set_bit(event, workload->pending_events,
                                 INTEL_GVT_EVENT_MAX)
-                       intel_vgpu_trigger_virtual_event(workload->vgpu,
-                                       event);
+                       intel_vgpu_trigger_virtual_event(vgpu, event);
        }
 
        gvt_dbg_sched("ring id %d complete workload %p status %d\n",
@@ -373,11 +374,10 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
 
        scheduler->current_workload[ring_id] = NULL;
 
-       atomic_dec(&workload->vgpu->running_workload_num);
-
        list_del_init(&workload->list);
        workload->complete(workload);
 
+       atomic_dec(&vgpu->running_workload_num);
        wake_up(&scheduler->workload_complete_wq);
        mutex_unlock(&gvt->lock);
 }
@@ -459,11 +459,11 @@ complete:
                gvt_dbg_sched("will complete workload %p\n, status: %d\n",
                                workload, workload->status);
 
-               complete_current_workload(gvt, ring_id);
-
                if (workload->req)
                        i915_gem_request_put(fetch_and_zero(&workload->req));
 
+               complete_current_workload(gvt, ring_id);
+
                if (need_force_wake)
                        intel_uncore_forcewake_put(gvt->dev_priv,
                                        FORCEWAKE_ALL);
index 3b30c28bff515f0fd1ce7e385ea89d6f17ccc834..2833dfa8c9aed8e9b6c8f86bbd2ad9a45c461b0c 100644 (file)
@@ -113,7 +113,7 @@ struct intel_shadow_bb_entry {
        struct drm_i915_gem_object *obj;
        void *va;
        unsigned long len;
-       void *bb_start_cmd_va;
+       u32 *bb_start_cmd_va;
 };
 
 #define workload_q_head(vgpu, ring_id) \
index 536d2b9d577732f57a1775f54bdb4a7bce8a7e39..7295bc8e12fb240eeaf6f9434d2bba713453ad3c 100644 (file)
 #include "gvt.h"
 #include "i915_pvinfo.h"
 
-static void clean_vgpu_mmio(struct intel_vgpu *vgpu)
-{
-       vfree(vgpu->mmio.vreg);
-       vgpu->mmio.vreg = vgpu->mmio.sreg = NULL;
-}
-
-int setup_vgpu_mmio(struct intel_vgpu *vgpu)
-{
-       struct intel_gvt *gvt = vgpu->gvt;
-       const struct intel_gvt_device_info *info = &gvt->device_info;
-
-       if (vgpu->mmio.vreg)
-               memset(vgpu->mmio.vreg, 0, info->mmio_size * 2);
-       else {
-               vgpu->mmio.vreg = vzalloc(info->mmio_size * 2);
-               if (!vgpu->mmio.vreg)
-                       return -ENOMEM;
-       }
-
-       vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size;
-
-       memcpy(vgpu->mmio.vreg, gvt->firmware.mmio, info->mmio_size);
-       memcpy(vgpu->mmio.sreg, gvt->firmware.mmio, info->mmio_size);
-
-       vgpu_vreg(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0;
-
-       /* set the bit 0:2(Core C-State ) to C0 */
-       vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0;
-       return 0;
-}
-
-static void setup_vgpu_cfg_space(struct intel_vgpu *vgpu,
-       struct intel_vgpu_creation_params *param)
-{
-       struct intel_gvt *gvt = vgpu->gvt;
-       const struct intel_gvt_device_info *info = &gvt->device_info;
-       u16 *gmch_ctl;
-       int i;
-
-       memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space,
-              info->cfg_space_size);
-
-       if (!param->primary) {
-               vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] =
-                       INTEL_GVT_PCI_CLASS_VGA_OTHER;
-               vgpu_cfg_space(vgpu)[PCI_CLASS_PROG] =
-                       INTEL_GVT_PCI_CLASS_VGA_OTHER;
-       }
-
-       /* Show guest that there isn't any stolen memory.*/
-       gmch_ctl = (u16 *)(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_GMCH_CONTROL);
-       *gmch_ctl &= ~(BDW_GMCH_GMS_MASK << BDW_GMCH_GMS_SHIFT);
-
-       intel_vgpu_write_pci_bar(vgpu, PCI_BASE_ADDRESS_2,
-                                gvt_aperture_pa_base(gvt), true);
-
-       vgpu_cfg_space(vgpu)[PCI_COMMAND] &= ~(PCI_COMMAND_IO
-                                            | PCI_COMMAND_MEMORY
-                                            | PCI_COMMAND_MASTER);
-       /*
-        * Clear the bar upper 32bit and let guest to assign the new value
-        */
-       memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4);
-       memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4);
-       memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4);
-
-       for (i = 0; i < INTEL_GVT_MAX_BAR_NUM; i++) {
-               vgpu->cfg_space.bar[i].size = pci_resource_len(
-                                             gvt->dev_priv->drm.pdev, i * 2);
-               vgpu->cfg_space.bar[i].tracked = false;
-       }
-}
-
 void populate_pvinfo_page(struct intel_vgpu *vgpu)
 {
        /* setup the ballooning information */
@@ -177,7 +104,7 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
                if (low_avail / min_low == 0)
                        break;
                gvt->types[i].low_gm_size = min_low;
-               gvt->types[i].high_gm_size = 3 * gvt->types[i].low_gm_size;
+               gvt->types[i].high_gm_size = max((min_low<<3), MB_TO_BYTES(384U));
                gvt->types[i].fence = 4;
                gvt->types[i].max_instance = low_avail / min_low;
                gvt->types[i].avail_instance = gvt->types[i].max_instance;
@@ -217,7 +144,7 @@ static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt)
         */
        low_gm_avail = MB_TO_BYTES(256) - HOST_LOW_GM_SIZE -
                gvt->gm.vgpu_allocated_low_gm_size;
-       high_gm_avail = MB_TO_BYTES(256) * 3 - HOST_HIGH_GM_SIZE -
+       high_gm_avail = MB_TO_BYTES(256) * 8UL - HOST_HIGH_GM_SIZE -
                gvt->gm.vgpu_allocated_high_gm_size;
        fence_avail = gvt_fence_sz(gvt) - HOST_FENCE -
                gvt->fence.vgpu_allocated_fence_num;
@@ -268,7 +195,7 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
        intel_vgpu_clean_gtt(vgpu);
        intel_gvt_hypervisor_detach_vgpu(vgpu);
        intel_vgpu_free_resource(vgpu);
-       clean_vgpu_mmio(vgpu);
+       intel_vgpu_clean_mmio(vgpu);
        vfree(vgpu);
 
        intel_gvt_update_vgpu_types(gvt);
@@ -300,11 +227,11 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
        vgpu->gvt = gvt;
        bitmap_zero(vgpu->tlb_handle_pending, I915_NUM_ENGINES);
 
-       setup_vgpu_cfg_space(vgpu, param);
+       intel_vgpu_init_cfg_space(vgpu, param->primary);
 
-       ret = setup_vgpu_mmio(vgpu);
+       ret = intel_vgpu_init_mmio(vgpu);
        if (ret)
-               goto out_free_vgpu;
+               goto out_clean_idr;
 
        ret = intel_vgpu_alloc_resource(vgpu, param);
        if (ret)
@@ -354,7 +281,9 @@ out_detach_hypervisor_vgpu:
 out_clean_vgpu_resource:
        intel_vgpu_free_resource(vgpu);
 out_clean_vgpu_mmio:
-       clean_vgpu_mmio(vgpu);
+       intel_vgpu_clean_mmio(vgpu);
+out_clean_idr:
+       idr_remove(&gvt->vgpu_idr, vgpu->id);
 out_free_vgpu:
        vfree(vgpu);
        mutex_unlock(&gvt->lock);
@@ -398,7 +327,75 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
 }
 
 /**
- * intel_gvt_reset_vgpu - reset a virtual GPU
+ * intel_gvt_reset_vgpu_locked - reset a virtual GPU by DMLR or GT reset
+ * @vgpu: virtual GPU
+ * @dmlr: vGPU Device Model Level Reset or GT Reset
+ * @engine_mask: engines to reset for GT reset
+ *
+ * This function is called when user wants to reset a virtual GPU through
+ * device model reset or GT reset. The caller should hold the gvt lock.
+ *
+ * vGPU Device Model Level Reset (DMLR) simulates the PCI level reset to reset
+ * the whole vGPU to default state as when it is created. This vGPU function
+ * is required both for functionary and security concerns.The ultimate goal
+ * of vGPU FLR is that reuse a vGPU instance by virtual machines. When we
+ * assign a vGPU to a virtual machine we must isse such reset first.
+ *
+ * Full GT Reset and Per-Engine GT Reset are soft reset flow for GPU engines
+ * (Render, Blitter, Video, Video Enhancement). It is defined by GPU Spec.
+ * Unlike the FLR, GT reset only reset particular resource of a vGPU per
+ * the reset request. Guest driver can issue a GT reset by programming the
+ * virtual GDRST register to reset specific virtual GPU engine or all
+ * engines.
+ *
+ * The parameter dev_level is to identify if we will do DMLR or GT reset.
+ * The parameter engine_mask is to specific the engines that need to be
+ * resetted. If value ALL_ENGINES is given for engine_mask, it means
+ * the caller requests a full GT reset that we will reset all virtual
+ * GPU engines. For FLR, engine_mask is ignored.
+ */
+void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
+                                unsigned int engine_mask)
+{
+       struct intel_gvt *gvt = vgpu->gvt;
+       struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
+
+       gvt_dbg_core("------------------------------------------\n");
+       gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n",
+                    vgpu->id, dmlr, engine_mask);
+       vgpu->resetting = true;
+
+       intel_vgpu_stop_schedule(vgpu);
+       /*
+        * The current_vgpu will set to NULL after stopping the
+        * scheduler when the reset is triggered by current vgpu.
+        */
+       if (scheduler->current_vgpu == NULL) {
+               mutex_unlock(&gvt->lock);
+               intel_gvt_wait_vgpu_idle(vgpu);
+               mutex_lock(&gvt->lock);
+       }
+
+       intel_vgpu_reset_execlist(vgpu, dmlr ? ALL_ENGINES : engine_mask);
+
+       /* full GPU reset or device model level reset */
+       if (engine_mask == ALL_ENGINES || dmlr) {
+               intel_vgpu_reset_gtt(vgpu, dmlr);
+               intel_vgpu_reset_resource(vgpu);
+               intel_vgpu_reset_mmio(vgpu);
+               populate_pvinfo_page(vgpu);
+
+               if (dmlr)
+                       intel_vgpu_reset_cfg_space(vgpu);
+       }
+
+       vgpu->resetting = false;
+       gvt_dbg_core("reset vgpu%d done\n", vgpu->id);
+       gvt_dbg_core("------------------------------------------\n");
+}
+
+/**
+ * intel_gvt_reset_vgpu - reset a virtual GPU (Function Level)
  * @vgpu: virtual GPU
  *
  * This function is called when user wants to reset a virtual GPU.
@@ -406,4 +403,7 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
  */
 void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu)
 {
+       mutex_lock(&vgpu->gvt->lock);
+       intel_gvt_reset_vgpu_locked(vgpu, true, 0);
+       mutex_unlock(&vgpu->gvt->lock);
 }
index 445fec9c2841ad61282e538d4cfdbe1436e04fb3..728ca3ea74d2c85df8734ddaa9285e126bc2c82c 100644 (file)
@@ -213,7 +213,8 @@ static void intel_detect_pch(struct drm_device *dev)
                        } else if (id == INTEL_PCH_KBP_DEVICE_ID_TYPE) {
                                dev_priv->pch_type = PCH_KBP;
                                DRM_DEBUG_KMS("Found KabyPoint PCH\n");
-                               WARN_ON(!IS_KABYLAKE(dev_priv));
+                               WARN_ON(!IS_SKYLAKE(dev_priv) &&
+                                       !IS_KABYLAKE(dev_priv));
                        } else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) ||
                                   (id == INTEL_PCH_P3X_DEVICE_ID_TYPE) ||
                                   ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) &&
@@ -2378,7 +2379,7 @@ static int intel_runtime_suspend(struct device *kdev)
 
        assert_forcewakes_inactive(dev_priv);
 
-       if (!IS_VALLEYVIEW(dev_priv) || !IS_CHERRYVIEW(dev_priv))
+       if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
                intel_hpd_poll_init(dev_priv);
 
        DRM_DEBUG_KMS("Device suspended\n");
@@ -2427,6 +2428,7 @@ static int intel_runtime_resume(struct device *kdev)
         * we can do is to hope that things will still work (and disable RPM).
         */
        i915_gem_init_swizzling(dev_priv);
+       i915_gem_restore_fences(dev_priv);
 
        intel_runtime_pm_enable_interrupts(dev_priv);
 
index 243224aeabf82f111ab14c4d6995ce868c0d36c1..8493e19b563a134ba588e9df39c3fb5785a25a69 100644 (file)
@@ -1012,6 +1012,8 @@ struct intel_fbc {
        struct work_struct underrun_work;
 
        struct intel_fbc_state_cache {
+               struct i915_vma *vma;
+
                struct {
                        unsigned int mode_flags;
                        uint32_t hsw_bdw_pixel_rate;
@@ -1025,15 +1027,14 @@ struct intel_fbc {
                } plane;
 
                struct {
-                       u64 ilk_ggtt_offset;
                        uint32_t pixel_format;
                        unsigned int stride;
-                       int fence_reg;
-                       unsigned int tiling_mode;
                } fb;
        } state_cache;
 
        struct intel_fbc_reg_params {
+               struct i915_vma *vma;
+
                struct {
                        enum pipe pipe;
                        enum plane plane;
@@ -1041,10 +1042,8 @@ struct intel_fbc {
                } crtc;
 
                struct {
-                       u64 ggtt_offset;
                        uint32_t pixel_format;
                        unsigned int stride;
-                       int fence_reg;
                } fb;
 
                int cfb_size;
@@ -1977,6 +1976,11 @@ struct drm_i915_private {
 
        struct i915_frontbuffer_tracking fb_tracking;
 
+       struct intel_atomic_helper {
+               struct llist_head free_list;
+               struct work_struct free_work;
+       } atomic_helper;
+
        u16 orig_clock;
 
        bool mchbar_need_disable;
@@ -3163,13 +3167,6 @@ i915_gem_object_to_ggtt(struct drm_i915_gem_object *obj,
        return i915_gem_obj_to_vma(obj, &to_i915(obj->base.dev)->ggtt.base, view);
 }
 
-static inline unsigned long
-i915_gem_object_ggtt_offset(struct drm_i915_gem_object *o,
-                           const struct i915_ggtt_view *view)
-{
-       return i915_ggtt_offset(i915_gem_object_to_ggtt(o, view));
-}
-
 /* i915_gem_fence_reg.c */
 int __must_check i915_vma_get_fence(struct i915_vma *vma);
 int __must_check i915_vma_put_fence(struct i915_vma *vma);
index 3dd7fc662859a90803b142a8adf7f542f29c5948..24b5b046754b37e8b8e3ab2c04a9f890b41eecf9 100644 (file)
@@ -595,47 +595,21 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
                     struct drm_i915_gem_pwrite *args,
                     struct drm_file *file)
 {
-       struct drm_device *dev = obj->base.dev;
        void *vaddr = obj->phys_handle->vaddr + args->offset;
        char __user *user_data = u64_to_user_ptr(args->data_ptr);
-       int ret;
 
        /* We manually control the domain here and pretend that it
         * remains coherent i.e. in the GTT domain, like shmem_pwrite.
         */
-       lockdep_assert_held(&obj->base.dev->struct_mutex);
-       ret = i915_gem_object_wait(obj,
-                                  I915_WAIT_INTERRUPTIBLE |
-                                  I915_WAIT_LOCKED |
-                                  I915_WAIT_ALL,
-                                  MAX_SCHEDULE_TIMEOUT,
-                                  to_rps_client(file));
-       if (ret)
-               return ret;
-
        intel_fb_obj_invalidate(obj, ORIGIN_CPU);
-       if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
-               unsigned long unwritten;
-
-               /* The physical object once assigned is fixed for the lifetime
-                * of the obj, so we can safely drop the lock and continue
-                * to access vaddr.
-                */
-               mutex_unlock(&dev->struct_mutex);
-               unwritten = copy_from_user(vaddr, user_data, args->size);
-               mutex_lock(&dev->struct_mutex);
-               if (unwritten) {
-                       ret = -EFAULT;
-                       goto out;
-               }
-       }
+       if (copy_from_user(vaddr, user_data, args->size))
+               return -EFAULT;
 
        drm_clflush_virt_range(vaddr, args->size);
-       i915_gem_chipset_flush(to_i915(dev));
+       i915_gem_chipset_flush(to_i915(obj->base.dev));
 
-out:
        intel_fb_obj_flush(obj, false, ORIGIN_CPU);
-       return ret;
+       return 0;
 }
 
 void *i915_gem_object_alloc(struct drm_device *dev)
@@ -2036,8 +2010,16 @@ void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
        for (i = 0; i < dev_priv->num_fence_regs; i++) {
                struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
 
-               if (WARN_ON(reg->pin_count))
-                       continue;
+               /* Ideally we want to assert that the fence register is not
+                * live at this point (i.e. that no piece of code will be
+                * trying to write through fence + GTT, as that both violates
+                * our tracking of activity and associated locking/barriers,
+                * but also is illegal given that the hw is powered down).
+                *
+                * Previously we used reg->pin_count as a "liveness" indicator.
+                * That is not sufficient, and we need a more fine-grained
+                * tool if we want to have a sanity check here.
+                */
 
                if (!reg->vma)
                        continue;
@@ -3504,7 +3486,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
        vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
 
        /* Treat this as an end-of-frame, like intel_user_framebuffer_dirty() */
-       if (obj->cache_dirty) {
+       if (obj->cache_dirty || obj->base.write_domain == I915_GEM_DOMAIN_CPU) {
                i915_gem_clflush_object(obj, true);
                intel_fb_obj_flush(obj, false, ORIGIN_DIRTYFB);
        }
index bd08814b015cb238c639e2becec6917901890d4e..d534a316a16ee412420c7e5cc01fb37c03755480 100644 (file)
@@ -199,6 +199,7 @@ found:
        }
 
        /* Unbinding will emit any required flushes */
+       ret = 0;
        while (!list_empty(&eviction_list)) {
                vma = list_first_entry(&eviction_list,
                                       struct i915_vma,
index 097d9d8c2315e061a3600615532c6d51531ee42c..b8b877c91b0a9b36b1a9ac318b2e5255b096a46e 100644 (file)
@@ -1181,14 +1181,14 @@ validate_exec_list(struct drm_device *dev,
                        if (exec[i].offset !=
                            gen8_canonical_addr(exec[i].offset & PAGE_MASK))
                                return -EINVAL;
-
-                       /* From drm_mm perspective address space is continuous,
-                        * so from this point we're always using non-canonical
-                        * form internally.
-                        */
-                       exec[i].offset = gen8_noncanonical_addr(exec[i].offset);
                }
 
+               /* From drm_mm perspective address space is continuous,
+                * so from this point we're always using non-canonical
+                * form internally.
+                */
+               exec[i].offset = gen8_noncanonical_addr(exec[i].offset);
+
                if (exec[i].alignment && !is_power_of_2(exec[i].alignment))
                        return -EINVAL;
 
index 4b3ff3e5b911167557880228d5da44bb3d2616bf..d09c74973cb37a6c8d599c007ea4a53c0e0994e0 100644 (file)
@@ -66,8 +66,16 @@ i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
 
        max_order = MAX_ORDER;
 #ifdef CONFIG_SWIOTLB
-       if (swiotlb_nr_tbl()) /* minimum max swiotlb size is IO_TLB_SEGSIZE */
-               max_order = min(max_order, ilog2(IO_TLB_SEGPAGES));
+       if (swiotlb_nr_tbl()) {
+               unsigned int max_segment;
+
+               max_segment = swiotlb_max_segment();
+               if (max_segment) {
+                       max_segment = max_t(unsigned int, max_segment,
+                                           PAGE_SIZE) >> PAGE_SHIFT;
+                       max_order = min(max_order, ilog2(max_segment));
+               }
+       }
 #endif
 
        gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_RECLAIMABLE;
index 6a368de9d81e7be2d6e775b3cca7c7187e6a4c88..ecfefb9d42e4a85ee94946d6d1406f5084612874 100644 (file)
@@ -256,7 +256,7 @@ extern void drm_gem_object_unreference_unlocked(struct drm_gem_object *);
 static inline bool
 i915_gem_object_is_dead(const struct drm_i915_gem_object *obj)
 {
-       return atomic_read(&obj->base.refcount.refcount) == 0;
+       return kref_read(&obj->base.refcount) == 0;
 }
 
 static inline bool
index a792dcb902b51d337f46f2c1ad7ad1ea737f4c8a..e924a95160796d1c8aa708e1c9ac0312b1015eb8 100644 (file)
@@ -185,6 +185,7 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
                        return ret;
        }
 
+       trace_i915_vma_bind(vma, bind_flags);
        ret = vma->vm->bind_vma(vma, cache_level, bind_flags);
        if (ret)
                return ret;
index dbe9fb41ae535449f996ab36e51bd626be6651fc..8d3e515f27bade27acda9544a30d975628ac8ab3 100644 (file)
@@ -85,6 +85,8 @@ intel_plane_duplicate_state(struct drm_plane *plane)
 
        __drm_atomic_helper_plane_duplicate_state(plane, state);
 
+       intel_state->vma = NULL;
+
        return state;
 }
 
@@ -100,6 +102,24 @@ void
 intel_plane_destroy_state(struct drm_plane *plane,
                          struct drm_plane_state *state)
 {
+       struct i915_vma *vma;
+
+       vma = fetch_and_zero(&to_intel_plane_state(state)->vma);
+
+       /*
+        * FIXME: Normally intel_cleanup_plane_fb handles destruction of vma.
+        * We currently don't clear all planes during driver unload, so we have
+        * to be able to unpin vma here for now.
+        *
+        * Normally this can only happen during unload when kmscon is disabled
+        * and userspace doesn't attempt to set a framebuffer at all.
+        */
+       if (vma) {
+               mutex_lock(&plane->dev->struct_mutex);
+               intel_unpin_fb_vma(vma);
+               mutex_unlock(&plane->dev->struct_mutex);
+       }
+
        drm_atomic_helper_plane_destroy_state(plane, state);
 }
 
index 86ecec5601d42dd59f937f7c8c2203b3aeb105bc..588470eb8d395df2719fe2e172f93f414ba88e92 100644 (file)
@@ -499,6 +499,7 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
        struct drm_i915_private *dev_priv = to_i915(crt->base.base.dev);
        struct edid *edid;
        struct i2c_adapter *i2c;
+       bool ret = false;
 
        BUG_ON(crt->base.type != INTEL_OUTPUT_ANALOG);
 
@@ -515,17 +516,17 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
                 */
                if (!is_digital) {
                        DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n");
-                       return true;
+                       ret = true;
+               } else {
+                       DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n");
                }
-
-               DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n");
        } else {
                DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [no valid EDID found]\n");
        }
 
        kfree(edid);
 
-       return false;
+       return ret;
 }
 
 static enum drm_connector_status
index 3dc8724df4004842c78bc985606da52a06e449c0..891c86aef99dfe1b939d5641d0b604bf9b3a49de 100644 (file)
@@ -2235,24 +2235,22 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
                        i915_vma_pin_fence(vma);
        }
 
+       i915_vma_get(vma);
 err:
        intel_runtime_pm_put(dev_priv);
        return vma;
 }
 
-void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
+void intel_unpin_fb_vma(struct i915_vma *vma)
 {
-       struct drm_i915_gem_object *obj = intel_fb_obj(fb);
-       struct i915_ggtt_view view;
-       struct i915_vma *vma;
+       lockdep_assert_held(&vma->vm->dev->struct_mutex);
 
-       WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
-
-       intel_fill_fb_ggtt_view(&view, fb, rotation);
-       vma = i915_gem_object_to_ggtt(obj, &view);
+       if (WARN_ON_ONCE(!vma))
+               return;
 
        i915_vma_unpin_fence(vma);
        i915_gem_object_unpin_from_display_plane(vma);
+       i915_vma_put(vma);
 }
 
 static int intel_fb_pitch(const struct drm_framebuffer *fb, int plane,
@@ -2585,8 +2583,9 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
                         * We only keep the x/y offsets, so push all of the
                         * gtt offset into the x/y offsets.
                         */
-                       _intel_adjust_tile_offset(&x, &y, tile_size,
-                                                 tile_width, tile_height, pitch_tiles,
+                       _intel_adjust_tile_offset(&x, &y,
+                                                 tile_width, tile_height,
+                                                 tile_size, pitch_tiles,
                                                  gtt_offset_rotated * tile_size, 0);
 
                        gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height;
@@ -2746,7 +2745,6 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
        struct drm_device *dev = intel_crtc->base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_crtc *c;
-       struct intel_crtc *i;
        struct drm_i915_gem_object *obj;
        struct drm_plane *primary = intel_crtc->base.primary;
        struct drm_plane_state *plane_state = primary->state;
@@ -2771,20 +2769,20 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
         * an fb with another CRTC instead
         */
        for_each_crtc(dev, c) {
-               i = to_intel_crtc(c);
+               struct intel_plane_state *state;
 
                if (c == &intel_crtc->base)
                        continue;
 
-               if (!i->active)
+               if (!to_intel_crtc(c)->active)
                        continue;
 
-               fb = c->primary->fb;
-               if (!fb)
+               state = to_intel_plane_state(c->primary->state);
+               if (!state->vma)
                        continue;
 
-               obj = intel_fb_obj(fb);
-               if (i915_gem_object_ggtt_offset(obj, NULL) == plane_config->base) {
+               if (intel_plane_ggtt_offset(state) == plane_config->base) {
+                       fb = c->primary->fb;
                        drm_framebuffer_reference(fb);
                        goto valid_fb;
                }
@@ -2805,6 +2803,19 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
        return;
 
 valid_fb:
+       mutex_lock(&dev->struct_mutex);
+       intel_state->vma =
+               intel_pin_and_fence_fb_obj(fb, primary->state->rotation);
+       mutex_unlock(&dev->struct_mutex);
+       if (IS_ERR(intel_state->vma)) {
+               DRM_ERROR("failed to pin boot fb on pipe %d: %li\n",
+                         intel_crtc->pipe, PTR_ERR(intel_state->vma));
+
+               intel_state->vma = NULL;
+               drm_framebuffer_unreference(fb);
+               return;
+       }
+
        plane_state->src_x = 0;
        plane_state->src_y = 0;
        plane_state->src_w = fb->width << 16;
@@ -2967,6 +2978,9 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state)
        unsigned int rotation = plane_state->base.rotation;
        int ret;
 
+       if (!plane_state->base.visible)
+               return 0;
+
        /* Rotate src coordinates to match rotated GTT view */
        if (drm_rotation_90_or_270(rotation))
                drm_rect_rotate(&plane_state->base.src,
@@ -3097,13 +3111,13 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
        I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
        if (INTEL_GEN(dev_priv) >= 4) {
                I915_WRITE(DSPSURF(plane),
-                          intel_fb_gtt_offset(fb, rotation) +
+                          intel_plane_ggtt_offset(plane_state) +
                           intel_crtc->dspaddr_offset);
                I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
                I915_WRITE(DSPLINOFF(plane), linear_offset);
        } else {
                I915_WRITE(DSPADDR(plane),
-                          intel_fb_gtt_offset(fb, rotation) +
+                          intel_plane_ggtt_offset(plane_state) +
                           intel_crtc->dspaddr_offset);
        }
        POSTING_READ(reg);
@@ -3200,7 +3214,7 @@ static void ironlake_update_primary_plane(struct drm_plane *primary,
 
        I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
        I915_WRITE(DSPSURF(plane),
-                  intel_fb_gtt_offset(fb, rotation) +
+                  intel_plane_ggtt_offset(plane_state) +
                   intel_crtc->dspaddr_offset);
        if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
                I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
@@ -3223,23 +3237,6 @@ u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv,
        }
 }
 
-u32 intel_fb_gtt_offset(struct drm_framebuffer *fb,
-                       unsigned int rotation)
-{
-       struct drm_i915_gem_object *obj = intel_fb_obj(fb);
-       struct i915_ggtt_view view;
-       struct i915_vma *vma;
-
-       intel_fill_fb_ggtt_view(&view, fb, rotation);
-
-       vma = i915_gem_object_to_ggtt(obj, &view);
-       if (WARN(!vma, "ggtt vma for display object not found! (view=%u)\n",
-                view.type))
-               return -1;
-
-       return i915_ggtt_offset(vma);
-}
-
 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
 {
        struct drm_device *dev = intel_crtc->base.dev;
@@ -3434,7 +3431,7 @@ static void skylake_update_primary_plane(struct drm_plane *plane,
        }
 
        I915_WRITE(PLANE_SURF(pipe, 0),
-                  intel_fb_gtt_offset(fb, rotation) + surf_addr);
+                  intel_plane_ggtt_offset(plane_state) + surf_addr);
 
        POSTING_READ(PLANE_SURF(pipe, 0));
 }
@@ -4265,10 +4262,10 @@ static void page_flip_completed(struct intel_crtc *intel_crtc)
        drm_crtc_vblank_put(&intel_crtc->base);
 
        wake_up_all(&dev_priv->pending_flip_queue);
-       queue_work(dev_priv->wq, &work->unpin_work);
-
        trace_i915_flip_complete(intel_crtc->plane,
                                 work->pending_flip_obj);
+
+       queue_work(dev_priv->wq, &work->unpin_work);
 }
 
 static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
@@ -6846,6 +6843,12 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
        }
 
        state = drm_atomic_state_alloc(crtc->dev);
+       if (!state) {
+               DRM_DEBUG_KMS("failed to disable [CRTC:%d:%s], out of memory",
+                             crtc->base.id, crtc->name);
+               return;
+       }
+
        state->acquire_ctx = crtc->dev->mode_config.acquire_ctx;
 
        /* Everything's already locked, -EDEADLK can't happen. */
@@ -11243,6 +11246,7 @@ found:
        }
 
        old->restore_state = restore_state;
+       drm_atomic_state_put(state);
 
        /* let the connector get through one full cycle before testing */
        intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
@@ -11522,7 +11526,7 @@ static void intel_unpin_work_fn(struct work_struct *__work)
                flush_work(&work->mmio_work);
 
        mutex_lock(&dev->struct_mutex);
-       intel_unpin_fb_obj(work->old_fb, primary->state->rotation);
+       intel_unpin_fb_vma(work->old_vma);
        i915_gem_object_put(work->pending_flip_obj);
        mutex_unlock(&dev->struct_mutex);
 
@@ -12232,8 +12236,10 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
                goto cleanup_pending;
        }
 
-       work->gtt_offset = intel_fb_gtt_offset(fb, primary->state->rotation);
-       work->gtt_offset += intel_crtc->dspaddr_offset;
+       work->old_vma = to_intel_plane_state(primary->state)->vma;
+       to_intel_plane_state(primary->state)->vma = vma;
+
+       work->gtt_offset = i915_ggtt_offset(vma) + intel_crtc->dspaddr_offset;
        work->rotation = crtc->primary->state->rotation;
 
        /*
@@ -12287,7 +12293,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 cleanup_request:
        i915_add_request_no_flush(request);
 cleanup_unpin:
-       intel_unpin_fb_obj(fb, crtc->primary->state->rotation);
+       to_intel_plane_state(primary->state)->vma = work->old_vma;
+       intel_unpin_fb_vma(vma);
 cleanup_pending:
        atomic_dec(&intel_crtc->unpin_work_count);
 unlock:
@@ -14512,8 +14519,14 @@ intel_atomic_commit_ready(struct i915_sw_fence *fence,
                break;
 
        case FENCE_FREE:
-               drm_atomic_state_put(&state->base);
-               break;
+               {
+                       struct intel_atomic_helper *helper =
+                               &to_i915(state->base.dev)->atomic_helper;
+
+                       if (llist_add(&state->freed, &helper->free_list))
+                               schedule_work(&helper->free_work);
+                       break;
+               }
        }
 
        return NOTIFY_DONE;
@@ -14774,6 +14787,8 @@ intel_prepare_plane_fb(struct drm_plane *plane,
                        DRM_DEBUG_KMS("failed to pin object\n");
                        return PTR_ERR(vma);
                }
+
+               to_intel_plane_state(new_state)->vma = vma;
        }
 
        return 0;
@@ -14792,19 +14807,12 @@ void
 intel_cleanup_plane_fb(struct drm_plane *plane,
                       struct drm_plane_state *old_state)
 {
-       struct drm_i915_private *dev_priv = to_i915(plane->dev);
-       struct intel_plane_state *old_intel_state;
-       struct drm_i915_gem_object *old_obj = intel_fb_obj(old_state->fb);
-       struct drm_i915_gem_object *obj = intel_fb_obj(plane->state->fb);
-
-       old_intel_state = to_intel_plane_state(old_state);
-
-       if (!obj && !old_obj)
-               return;
+       struct i915_vma *vma;
 
-       if (old_obj && (plane->type != DRM_PLANE_TYPE_CURSOR ||
-           !INTEL_INFO(dev_priv)->cursor_needs_physical))
-               intel_unpin_fb_obj(old_state->fb, old_state->rotation);
+       /* Should only be called after a successful intel_prepare_plane_fb()! */
+       vma = fetch_and_zero(&to_intel_plane_state(old_state)->vma);
+       if (vma)
+               intel_unpin_fb_vma(vma);
 }
 
 int
@@ -15146,7 +15154,7 @@ intel_update_cursor_plane(struct drm_plane *plane,
        if (!obj)
                addr = 0;
        else if (!INTEL_INFO(dev_priv)->cursor_needs_physical)
-               addr = i915_gem_object_ggtt_offset(obj, NULL);
+               addr = intel_plane_ggtt_offset(state);
        else
                addr = obj->phys_handle->busaddr;
 
@@ -16392,6 +16400,18 @@ fail:
        drm_modeset_acquire_fini(&ctx);
 }
 
+static void intel_atomic_helper_free_state(struct work_struct *work)
+{
+       struct drm_i915_private *dev_priv =
+               container_of(work, typeof(*dev_priv), atomic_helper.free_work);
+       struct intel_atomic_state *state, *next;
+       struct llist_node *freed;
+
+       freed = llist_del_all(&dev_priv->atomic_helper.free_list);
+       llist_for_each_entry_safe(state, next, freed, freed)
+               drm_atomic_state_put(&state->base);
+}
+
 int intel_modeset_init(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
@@ -16411,6 +16431,9 @@ int intel_modeset_init(struct drm_device *dev)
 
        dev->mode_config.funcs = &intel_mode_funcs;
 
+       INIT_WORK(&dev_priv->atomic_helper.free_work,
+                 intel_atomic_helper_free_state);
+
        intel_init_quirks(dev);
 
        intel_init_pm(dev_priv);
@@ -17024,47 +17047,19 @@ void intel_display_resume(struct drm_device *dev)
 
        if (ret)
                DRM_ERROR("Restoring old state failed with %i\n", ret);
-       drm_atomic_state_put(state);
+       if (state)
+               drm_atomic_state_put(state);
 }
 
 void intel_modeset_gem_init(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
-       struct drm_crtc *c;
-       struct drm_i915_gem_object *obj;
 
        intel_init_gt_powersave(dev_priv);
 
        intel_modeset_init_hw(dev);
 
        intel_setup_overlay(dev_priv);
-
-       /*
-        * Make sure any fbs we allocated at startup are properly
-        * pinned & fenced.  When we do the allocation it's too early
-        * for this.
-        */
-       for_each_crtc(dev, c) {
-               struct i915_vma *vma;
-
-               obj = intel_fb_obj(c->primary->fb);
-               if (obj == NULL)
-                       continue;
-
-               mutex_lock(&dev->struct_mutex);
-               vma = intel_pin_and_fence_fb_obj(c->primary->fb,
-                                                c->primary->state->rotation);
-               mutex_unlock(&dev->struct_mutex);
-               if (IS_ERR(vma)) {
-                       DRM_ERROR("failed to pin boot fb on pipe %d\n",
-                                 to_intel_crtc(c)->pipe);
-                       drm_framebuffer_unreference(c->primary->fb);
-                       c->primary->fb = NULL;
-                       c->primary->crtc = c->primary->state->crtc = NULL;
-                       update_state_fb(c->primary);
-                       c->state->plane_mask &= ~(1 << drm_plane_index(c->primary));
-               }
-       }
 }
 
 int intel_connector_register(struct drm_connector *connector)
@@ -17094,6 +17089,9 @@ void intel_modeset_cleanup(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
 
+       flush_work(&dev_priv->atomic_helper.free_work);
+       WARN_ON(!llist_empty(&dev_priv->atomic_helper.free_list));
+
        intel_disable_gt_powersave(dev_priv);
 
        /*
index 58a756f2f2244d054f80baba8ef9afb6892fc21e..a2f0e070d38d6c10001d34a8ae3969fe1e3d363c 100644 (file)
@@ -1730,7 +1730,8 @@ bxt_get_dpll(struct intel_crtc *crtc,
                return NULL;
 
        if ((encoder->type == INTEL_OUTPUT_DP ||
-            encoder->type == INTEL_OUTPUT_EDP) &&
+            encoder->type == INTEL_OUTPUT_EDP ||
+            encoder->type == INTEL_OUTPUT_DP_MST) &&
            !bxt_ddi_dp_set_dpll_hw_state(clock, &dpll_hw_state))
                return NULL;
 
index cd132c216a67decc86e4b1fbc22d544a8230b0ad..03a2112004f91e1d5ac011cabc255a36867e0e71 100644 (file)
@@ -370,11 +370,14 @@ struct intel_atomic_state {
        struct skl_wm_values wm_results;
 
        struct i915_sw_fence commit_ready;
+
+       struct llist_node freed;
 };
 
 struct intel_plane_state {
        struct drm_plane_state base;
        struct drm_rect clip;
+       struct i915_vma *vma;
 
        struct {
                u32 offset;
@@ -1044,6 +1047,7 @@ struct intel_flip_work {
        struct work_struct mmio_work;
 
        struct drm_crtc *crtc;
+       struct i915_vma *old_vma;
        struct drm_framebuffer *old_fb;
        struct drm_i915_gem_object *pending_flip_obj;
        struct drm_pending_vblank_event *event;
@@ -1271,7 +1275,7 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
                                    struct drm_modeset_acquire_ctx *ctx);
 struct i915_vma *
 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation);
-void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation);
+void intel_unpin_fb_vma(struct i915_vma *vma);
 struct drm_framebuffer *
 __intel_framebuffer_create(struct drm_device *dev,
                           struct drm_mode_fb_cmd2 *mode_cmd,
@@ -1360,7 +1364,10 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode,
 int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
 int skl_max_scale(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state);
 
-u32 intel_fb_gtt_offset(struct drm_framebuffer *fb, unsigned int rotation);
+static inline u32 intel_plane_ggtt_offset(const struct intel_plane_state *state)
+{
+       return i915_ggtt_offset(state->vma);
+}
 
 u32 skl_plane_ctl_format(uint32_t pixel_format);
 u32 skl_plane_ctl_tiling(uint64_t fb_modifier);
index 62f215b12eb5274b8251d3f46d2a4fdbfc590e96..f3a1d6a5cabe9fcf76f5812dc526781b678f7e41 100644 (file)
@@ -173,7 +173,7 @@ static void i8xx_fbc_activate(struct drm_i915_private *dev_priv)
        if (IS_I945GM(dev_priv))
                fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
        fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
-       fbc_ctl |= params->fb.fence_reg;
+       fbc_ctl |= params->vma->fence->id;
        I915_WRITE(FBC_CONTROL, fbc_ctl);
 }
 
@@ -193,8 +193,8 @@ static void g4x_fbc_activate(struct drm_i915_private *dev_priv)
        else
                dpfc_ctl |= DPFC_CTL_LIMIT_1X;
 
-       if (params->fb.fence_reg != I915_FENCE_REG_NONE) {
-               dpfc_ctl |= DPFC_CTL_FENCE_EN | params->fb.fence_reg;
+       if (params->vma->fence) {
+               dpfc_ctl |= DPFC_CTL_FENCE_EN | params->vma->fence->id;
                I915_WRITE(DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
        } else {
                I915_WRITE(DPFC_FENCE_YOFF, 0);
@@ -251,13 +251,14 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
                break;
        }
 
-       if (params->fb.fence_reg != I915_FENCE_REG_NONE) {
+       if (params->vma->fence) {
                dpfc_ctl |= DPFC_CTL_FENCE_EN;
                if (IS_GEN5(dev_priv))
-                       dpfc_ctl |= params->fb.fence_reg;
+                       dpfc_ctl |= params->vma->fence->id;
                if (IS_GEN6(dev_priv)) {
                        I915_WRITE(SNB_DPFC_CTL_SA,
-                                  SNB_CPU_FENCE_ENABLE | params->fb.fence_reg);
+                                  SNB_CPU_FENCE_ENABLE |
+                                  params->vma->fence->id);
                        I915_WRITE(DPFC_CPU_FENCE_OFFSET,
                                   params->crtc.fence_y_offset);
                }
@@ -269,7 +270,8 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
        }
 
        I915_WRITE(ILK_DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
-       I915_WRITE(ILK_FBC_RT_BASE, params->fb.ggtt_offset | ILK_FBC_RT_VALID);
+       I915_WRITE(ILK_FBC_RT_BASE,
+                  i915_ggtt_offset(params->vma) | ILK_FBC_RT_VALID);
        /* enable it... */
        I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
 
@@ -319,10 +321,11 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
                break;
        }
 
-       if (params->fb.fence_reg != I915_FENCE_REG_NONE) {
+       if (params->vma->fence) {
                dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
                I915_WRITE(SNB_DPFC_CTL_SA,
-                          SNB_CPU_FENCE_ENABLE | params->fb.fence_reg);
+                          SNB_CPU_FENCE_ENABLE |
+                          params->vma->fence->id);
                I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset);
        } else {
                I915_WRITE(SNB_DPFC_CTL_SA,0);
@@ -727,14 +730,6 @@ static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
        return effective_w <= max_w && effective_h <= max_h;
 }
 
-/* XXX replace me when we have VMA tracking for intel_plane_state */
-static int get_fence_id(struct drm_framebuffer *fb)
-{
-       struct i915_vma *vma = i915_gem_object_to_ggtt(intel_fb_obj(fb), NULL);
-
-       return vma && vma->fence ? vma->fence->id : I915_FENCE_REG_NONE;
-}
-
 static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
                                         struct intel_crtc_state *crtc_state,
                                         struct intel_plane_state *plane_state)
@@ -743,7 +738,8 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
        struct intel_fbc *fbc = &dev_priv->fbc;
        struct intel_fbc_state_cache *cache = &fbc->state_cache;
        struct drm_framebuffer *fb = plane_state->base.fb;
-       struct drm_i915_gem_object *obj;
+
+       cache->vma = NULL;
 
        cache->crtc.mode_flags = crtc_state->base.adjusted_mode.flags;
        if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
@@ -758,16 +754,10 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
        if (!cache->plane.visible)
                return;
 
-       obj = intel_fb_obj(fb);
-
-       /* FIXME: We lack the proper locking here, so only run this on the
-        * platforms that need. */
-       if (IS_GEN(dev_priv, 5, 6))
-               cache->fb.ilk_ggtt_offset = i915_gem_object_ggtt_offset(obj, NULL);
        cache->fb.pixel_format = fb->pixel_format;
        cache->fb.stride = fb->pitches[0];
-       cache->fb.fence_reg = get_fence_id(fb);
-       cache->fb.tiling_mode = i915_gem_object_get_tiling(obj);
+
+       cache->vma = plane_state->vma;
 }
 
 static bool intel_fbc_can_activate(struct intel_crtc *crtc)
@@ -784,7 +774,7 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
                return false;
        }
 
-       if (!cache->plane.visible) {
+       if (!cache->vma) {
                fbc->no_fbc_reason = "primary plane not visible";
                return false;
        }
@@ -807,8 +797,7 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
         * so have no fence associated with it) due to aperture constaints
         * at the time of pinning.
         */
-       if (cache->fb.tiling_mode != I915_TILING_X ||
-           cache->fb.fence_reg == I915_FENCE_REG_NONE) {
+       if (!cache->vma->fence) {
                fbc->no_fbc_reason = "framebuffer not tiled or fenced";
                return false;
        }
@@ -888,17 +877,16 @@ static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
         * zero. */
        memset(params, 0, sizeof(*params));
 
+       params->vma = cache->vma;
+
        params->crtc.pipe = crtc->pipe;
        params->crtc.plane = crtc->plane;
        params->crtc.fence_y_offset = get_crtc_fence_y_offset(crtc);
 
        params->fb.pixel_format = cache->fb.pixel_format;
        params->fb.stride = cache->fb.stride;
-       params->fb.fence_reg = cache->fb.fence_reg;
 
        params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache);
-
-       params->fb.ggtt_offset = cache->fb.ilk_ggtt_offset;
 }
 
 static bool intel_fbc_reg_params_equal(struct intel_fbc_reg_params *params1,
index beb08982dc0b5c3ea9711de7220c383e6ea41b92..f4a8c4fc57c4e654a1af91903275189c841b35bd 100644 (file)
@@ -284,7 +284,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
 out_destroy_fbi:
        drm_fb_helper_release_fbi(helper);
 out_unpin:
-       intel_unpin_fb_obj(&ifbdev->fb->base, DRM_ROTATE_0);
+       intel_unpin_fb_vma(vma);
 out_unlock:
        mutex_unlock(&dev->struct_mutex);
        return ret;
@@ -549,7 +549,7 @@ static void intel_fbdev_destroy(struct intel_fbdev *ifbdev)
 
        if (ifbdev->fb) {
                mutex_lock(&ifbdev->helper.dev->struct_mutex);
-               intel_unpin_fb_obj(&ifbdev->fb->base, DRM_ROTATE_0);
+               intel_unpin_fb_vma(ifbdev->vma);
                mutex_unlock(&ifbdev->helper.dev->struct_mutex);
 
                drm_framebuffer_remove(&ifbdev->fb->base);
@@ -742,6 +742,9 @@ void intel_fbdev_initial_config_async(struct drm_device *dev)
 {
        struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
 
+       if (!ifbdev)
+               return;
+
        ifbdev->cookie = async_schedule(intel_fbdev_initial_config, ifbdev);
 }
 
index d4961fa20c73d0e2d390673889ae5fa82f04dd07..beabc17e7c8af1a08e7dff250186b32e49f717a5 100644 (file)
@@ -979,18 +979,8 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine,
                                                uint32_t *batch,
                                                uint32_t index)
 {
-       struct drm_i915_private *dev_priv = engine->i915;
        uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES);
 
-       /*
-        * WaDisableLSQCROPERFforOCL:kbl
-        * This WA is implemented in skl_init_clock_gating() but since
-        * this batch updates GEN8_L3SQCREG4 with default value we need to
-        * set this bit here to retain the WA during flush.
-        */
-       if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0))
-               l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS;
-
        wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 |
                                   MI_SRM_LRM_GLOBAL_GTT));
        wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
index aeb637dc1fdf490caeb87f7f1938357916082ab8..91cb4c422ad5d52a5d7475e408dff744feba42d4 100644 (file)
@@ -1095,14 +1095,6 @@ static int kbl_init_workarounds(struct intel_engine_cs *engine)
                WA_SET_BIT_MASKED(HDC_CHICKEN0,
                                  HDC_FENCE_DEST_SLM_DISABLE);
 
-       /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
-        * involving this register should also be added to WA batch as required.
-        */
-       if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0))
-               /* WaDisableLSQCROPERFforOCL:kbl */
-               I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
-                          GEN8_LQSC_RO_PERF_DIS);
-
        /* WaToEnableHwFixForPushConstHWBug:kbl */
        if (IS_KBL_REVID(dev_priv, KBL_REVID_C0, REVID_FOREVER))
                WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
index 8f131a08d440cf02cbcd9ffe255b94e3f4eb6077..242a73e66d82862bea3ba881474efe327cd02d6e 100644 (file)
@@ -273,7 +273,7 @@ skl_update_plane(struct drm_plane *drm_plane,
 
        I915_WRITE(PLANE_CTL(pipe, plane), plane_ctl);
        I915_WRITE(PLANE_SURF(pipe, plane),
-                  intel_fb_gtt_offset(fb, rotation) + surf_addr);
+                  intel_plane_ggtt_offset(plane_state) + surf_addr);
        POSTING_READ(PLANE_SURF(pipe, plane));
 }
 
@@ -458,7 +458,7 @@ vlv_update_plane(struct drm_plane *dplane,
        I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w);
        I915_WRITE(SPCNTR(pipe, plane), sprctl);
        I915_WRITE(SPSURF(pipe, plane),
-                  intel_fb_gtt_offset(fb, rotation) + sprsurf_offset);
+                  intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
        POSTING_READ(SPSURF(pipe, plane));
 }
 
@@ -594,7 +594,7 @@ ivb_update_plane(struct drm_plane *plane,
                I915_WRITE(SPRSCALE(pipe), sprscale);
        I915_WRITE(SPRCTL(pipe), sprctl);
        I915_WRITE(SPRSURF(pipe),
-                  intel_fb_gtt_offset(fb, rotation) + sprsurf_offset);
+                  intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
        POSTING_READ(SPRSURF(pipe));
 }
 
@@ -721,7 +721,7 @@ ilk_update_plane(struct drm_plane *plane,
        I915_WRITE(DVSSCALE(pipe), dvsscale);
        I915_WRITE(DVSCNTR(pipe), dvscntr);
        I915_WRITE(DVSSURF(pipe),
-                  intel_fb_gtt_offset(fb, rotation) + dvssurf_offset);
+                  intel_plane_ggtt_offset(plane_state) + dvssurf_offset);
        POSTING_READ(DVSSURF(pipe));
 }
 
index 14ff87686a36ffb2580353bde1f0126964c2f8dc..686a580c711a99bfe61b5867e5adc75891e5e0a0 100644 (file)
@@ -345,7 +345,6 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
 {
        struct adreno_platform_config *config = pdev->dev.platform_data;
        struct msm_gpu *gpu = &adreno_gpu->base;
-       struct msm_mmu *mmu;
        int ret;
 
        adreno_gpu->funcs = funcs;
@@ -385,8 +384,8 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
                return ret;
        }
 
-       mmu = gpu->aspace->mmu;
-       if (mmu) {
+       if (gpu->aspace && gpu->aspace->mmu) {
+               struct msm_mmu *mmu = gpu->aspace->mmu;
                ret = mmu->funcs->attach(mmu, iommu_ports,
                                ARRAY_SIZE(iommu_ports));
                if (ret)
index 5f6cd8745dbce78d7fec38bb6fc4b37e89c57457..c396d459a9d062769471fdc84d01120c8d1e8525 100644 (file)
@@ -119,13 +119,7 @@ static void mdp5_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *st
 
 static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state)
 {
-       int i;
        struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
-       struct drm_plane *plane;
-       struct drm_plane_state *plane_state;
-
-       for_each_plane_in_state(state, plane, plane_state, i)
-               mdp5_plane_complete_commit(plane, plane_state);
 
        if (mdp5_kms->smp)
                mdp5_smp_complete_commit(mdp5_kms->smp, &mdp5_kms->state->smp);
index 17b0cc10117109bbc25a27de6a5e74d9b6f6c479..cdfc63d90c7b4bf4b7f1f116b410c347560663ef 100644 (file)
@@ -104,8 +104,6 @@ struct mdp5_plane_state {
 
        /* assigned by crtc blender */
        enum mdp_mixer_stage_id stage;
-
-       bool pending : 1;
 };
 #define to_mdp5_plane_state(x) \
                container_of(x, struct mdp5_plane_state, base)
@@ -232,8 +230,6 @@ int mdp5_irq_domain_init(struct mdp5_kms *mdp5_kms);
 void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms);
 
 uint32_t mdp5_plane_get_flush(struct drm_plane *plane);
-void mdp5_plane_complete_commit(struct drm_plane *plane,
-       struct drm_plane_state *state);
 enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane);
 struct drm_plane *mdp5_plane_init(struct drm_device *dev, bool primary);
 
index c099da7bc212d52f9bdf2d42f50da98e359cf53f..25d9d0a97156765918643b7cdd440b6108d3c5ed 100644 (file)
@@ -179,7 +179,6 @@ mdp5_plane_atomic_print_state(struct drm_printer *p,
        drm_printf(p, "\tzpos=%u\n", pstate->zpos);
        drm_printf(p, "\talpha=%u\n", pstate->alpha);
        drm_printf(p, "\tstage=%s\n", stage2name(pstate->stage));
-       drm_printf(p, "\tpending=%u\n", pstate->pending);
 }
 
 static void mdp5_plane_reset(struct drm_plane *plane)
@@ -220,8 +219,6 @@ mdp5_plane_duplicate_state(struct drm_plane *plane)
        if (mdp5_state && mdp5_state->base.fb)
                drm_framebuffer_reference(mdp5_state->base.fb);
 
-       mdp5_state->pending = false;
-
        return &mdp5_state->base;
 }
 
@@ -288,13 +285,6 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane,
        DBG("%s: check (%d -> %d)", plane->name,
                        plane_enabled(old_state), plane_enabled(state));
 
-       /* We don't allow faster-than-vblank updates.. if we did add this
-        * some day, we would need to disallow in cases where hwpipe
-        * changes
-        */
-       if (WARN_ON(to_mdp5_plane_state(old_state)->pending))
-               return -EBUSY;
-
        max_width = config->hw->lm.max_width << 16;
        max_height = config->hw->lm.max_height << 16;
 
@@ -370,12 +360,9 @@ static void mdp5_plane_atomic_update(struct drm_plane *plane,
                                     struct drm_plane_state *old_state)
 {
        struct drm_plane_state *state = plane->state;
-       struct mdp5_plane_state *mdp5_state = to_mdp5_plane_state(state);
 
        DBG("%s: update", plane->name);
 
-       mdp5_state->pending = true;
-
        if (plane_enabled(state)) {
                int ret;
 
@@ -851,15 +838,6 @@ uint32_t mdp5_plane_get_flush(struct drm_plane *plane)
        return pstate->hwpipe->flush_mask;
 }
 
-/* called after vsync in thread context */
-void mdp5_plane_complete_commit(struct drm_plane *plane,
-       struct drm_plane_state *state)
-{
-       struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state);
-
-       pstate->pending = false;
-}
-
 /* initialize plane */
 struct drm_plane *mdp5_plane_init(struct drm_device *dev, bool primary)
 {
index d8bc59c7e26142b377623a6736c575b30f6eeea2..1974ccb781deb85348bce9dbc015d7839bc45600 100644 (file)
@@ -294,6 +294,8 @@ put_iova(struct drm_gem_object *obj)
        WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 
        for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
+               if (!priv->aspace[id])
+                       continue;
                msm_gem_unmap_vma(priv->aspace[id],
                                &msm_obj->domain[id], msm_obj->sgt);
        }
@@ -640,7 +642,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
 
        seq_printf(m, "%08x: %c %2d (%2d) %08llx %p\t",
                        msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
-                       obj->name, obj->refcount.refcount.counter,
+                       obj->name, kref_read(&obj->refcount),
                        off, msm_obj->vaddr);
 
        for (id = 0; id < priv->num_aspaces; id++)
index 74856a8b8f35943b08a59f8aed5a546e98058d3c..e64f52464ecf55b83a17f25b434cb9e1474c10e1 100644 (file)
@@ -222,6 +222,7 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype)
                uint32_t mpllP;
 
                pci_read_config_dword(pci_get_bus_and_slot(0, 3), 0x6c, &mpllP);
+               mpllP = (mpllP >> 8) & 0xf;
                if (!mpllP)
                        mpllP = 4;
 
@@ -232,7 +233,7 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype)
                uint32_t clock;
 
                pci_read_config_dword(pci_get_bus_and_slot(0, 5), 0x4c, &clock);
-               return clock;
+               return clock / 1000;
        }
 
        ret = nouveau_hw_get_pllvals(dev, plltype, &pllvals);
index cef08da1da4e0bcb6a22fa73033562cdeea55006..6a157763dfc38f672f2b9384792679faf83b95d4 100644 (file)
@@ -411,7 +411,8 @@ nouveau_display_init(struct drm_device *dev)
                return ret;
 
        /* enable polling for external displays */
-       drm_kms_helper_poll_enable(dev);
+       if (!dev->mode_config.poll_enabled)
+               drm_kms_helper_poll_enable(dev);
 
        /* enable hotplug interrupts */
        list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
index 59348fc41c77b0edf725e01655fe3ef1722a4342..bc85a45f91cde756d9763e34108ab53c711b9bd4 100644 (file)
@@ -773,7 +773,10 @@ nouveau_pmops_runtime_resume(struct device *dev)
        pci_set_master(pdev);
 
        ret = nouveau_do_resume(drm_dev, true);
-       drm_kms_helper_poll_enable(drm_dev);
+
+       if (!drm_dev->mode_config.poll_enabled)
+               drm_kms_helper_poll_enable(drm_dev);
+
        /* do magic */
        nvif_mask(&device->object, 0x088488, (1 << 25), (1 << 25));
        vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);
index 8d5ed5bfdacb1d6c59e9db9df47a74be41462a9a..42c1fa53d4314f195b637c3de4323c86b095ae0f 100644 (file)
@@ -165,6 +165,8 @@ struct nouveau_drm {
        struct backlight_device *backlight;
        struct list_head bl_connectors;
        struct work_struct hpd_work;
+       struct work_struct fbcon_work;
+       int fbcon_new_state;
 #ifdef CONFIG_ACPI
        struct notifier_block acpi_nb;
 #endif
index 2f2a3dcd4ad777addbdcbfbb8cdc14922c7082eb..fa2d0a978cccbaac7a5640b5b6911779b6e8328f 100644 (file)
@@ -470,19 +470,43 @@ static const struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = {
        .fb_probe = nouveau_fbcon_create,
 };
 
+static void
+nouveau_fbcon_set_suspend_work(struct work_struct *work)
+{
+       struct nouveau_drm *drm = container_of(work, typeof(*drm), fbcon_work);
+       int state = READ_ONCE(drm->fbcon_new_state);
+
+       if (state == FBINFO_STATE_RUNNING)
+               pm_runtime_get_sync(drm->dev->dev);
+
+       console_lock();
+       if (state == FBINFO_STATE_RUNNING)
+               nouveau_fbcon_accel_restore(drm->dev);
+       drm_fb_helper_set_suspend(&drm->fbcon->helper, state);
+       if (state != FBINFO_STATE_RUNNING)
+               nouveau_fbcon_accel_save_disable(drm->dev);
+       console_unlock();
+
+       if (state == FBINFO_STATE_RUNNING) {
+               pm_runtime_mark_last_busy(drm->dev->dev);
+               pm_runtime_put_sync(drm->dev->dev);
+       }
+}
+
 void
 nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
 {
        struct nouveau_drm *drm = nouveau_drm(dev);
-       if (drm->fbcon) {
-               console_lock();
-               if (state == FBINFO_STATE_RUNNING)
-                       nouveau_fbcon_accel_restore(dev);
-               drm_fb_helper_set_suspend(&drm->fbcon->helper, state);
-               if (state != FBINFO_STATE_RUNNING)
-                       nouveau_fbcon_accel_save_disable(dev);
-               console_unlock();
-       }
+
+       if (!drm->fbcon)
+               return;
+
+       drm->fbcon_new_state = state;
+       /* Since runtime resume can happen as a result of a sysfs operation,
+        * it's possible we already have the console locked. So handle fbcon
+        * init/deinit from a seperate work thread
+        */
+       schedule_work(&drm->fbcon_work);
 }
 
 int
@@ -502,6 +526,7 @@ nouveau_fbcon_init(struct drm_device *dev)
                return -ENOMEM;
 
        drm->fbcon = fbcon;
+       INIT_WORK(&drm->fbcon_work, nouveau_fbcon_set_suspend_work);
 
        drm_fb_helper_prepare(dev, &fbcon->helper, &nouveau_fbcon_helper_funcs);
 
index a6126c93f215b0118e2c152cd0ad18d87c6f62ea..88ee60d1b907012cb2ae1ccc7dad3e89f9233a02 100644 (file)
@@ -527,7 +527,7 @@ static bool nouveau_fence_no_signaling(struct dma_fence *f)
         * caller should have a reference on the fence,
         * else fence could get freed here
         */
-       WARN_ON(atomic_read(&fence->base.refcount.refcount) <= 1);
+       WARN_ON(kref_read(&fence->base.refcount) <= 1);
 
        /*
         * This needs uevents to work correctly, but dma_fence_add_callback relies on
index ccdce1b4eec4b8bf183235ebae2eb5395a306430..d5e58a38f160182354b8c9a126bdcbb5d459f40a 100644 (file)
@@ -99,6 +99,7 @@ struct nv84_fence_priv {
        struct nouveau_bo *bo;
        struct nouveau_bo *bo_gart;
        u32 *suspend;
+       struct mutex mutex;
 };
 
 int  nv84_fence_context_new(struct nouveau_channel *);
index 187ecdb8200273baa77c41a42fbc65c3bcb93db6..21a5775028cc612e9a6c81e280777329f18233fd 100644 (file)
@@ -42,7 +42,7 @@ nouveau_led(struct drm_device *dev)
 }
 
 /* nouveau_led.c */
-#if IS_ENABLED(CONFIG_LEDS_CLASS)
+#if IS_REACHABLE(CONFIG_LEDS_CLASS)
 int  nouveau_led_init(struct drm_device *dev);
 void nouveau_led_suspend(struct drm_device *dev);
 void nouveau_led_resume(struct drm_device *dev);
index 08f9c6fa0f7f210d3e3fd5a0fbe8f11ff40b1972..1fba3862274474f0001deaec9fefaf6b0fd324b4 100644 (file)
@@ -313,7 +313,8 @@ usif_ioctl(struct drm_file *filp, void __user *user, u32 argc)
        if (!(ret = nvif_unpack(-ENOSYS, &data, &size, argv->v0, 0, 0, true))) {
                /* block access to objects not created via this interface */
                owner = argv->v0.owner;
-               if (argv->v0.object == 0ULL)
+               if (argv->v0.object == 0ULL &&
+                   argv->v0.type != NVIF_IOCTL_V0_DEL)
                        argv->v0.owner = NVDRM_OBJECT_ANY; /* except client */
                else
                        argv->v0.owner = NVDRM_OBJECT_USIF;
index 2c2c645076614b4f9c187d24a5e9d2667ea27778..32097fd615fd1e3a5954019c02dec36bd2e8db8c 100644 (file)
@@ -4052,6 +4052,11 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
                }
        }
 
+       for_each_crtc_in_state(state, crtc, crtc_state, i) {
+               if (crtc->state->event)
+                       drm_crtc_vblank_get(crtc);
+       }
+
        /* Update plane(s). */
        for_each_plane_in_state(state, plane, plane_state, i) {
                struct nv50_wndw_atom *asyw = nv50_wndw_atom(plane->state);
@@ -4101,6 +4106,7 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
                        drm_crtc_send_vblank_event(crtc, crtc->state->event);
                        spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
                        crtc->state->event = NULL;
+                       drm_crtc_vblank_put(crtc);
                }
        }
 
index 52b87ae83e7b4d0df54e003d58783eddd8deb6f5..f0b322bec7df22de23bbae372333fb484ac6d9ed 100644 (file)
@@ -107,8 +107,10 @@ nv84_fence_context_del(struct nouveau_channel *chan)
        struct nv84_fence_chan *fctx = chan->fence;
 
        nouveau_bo_wr32(priv->bo, chan->chid * 16 / 4, fctx->base.sequence);
+       mutex_lock(&priv->mutex);
        nouveau_bo_vma_del(priv->bo, &fctx->vma_gart);
        nouveau_bo_vma_del(priv->bo, &fctx->vma);
+       mutex_unlock(&priv->mutex);
        nouveau_fence_context_del(&fctx->base);
        chan->fence = NULL;
        nouveau_fence_context_free(&fctx->base);
@@ -134,11 +136,13 @@ nv84_fence_context_new(struct nouveau_channel *chan)
        fctx->base.sync32 = nv84_fence_sync32;
        fctx->base.sequence = nv84_fence_read(chan);
 
+       mutex_lock(&priv->mutex);
        ret = nouveau_bo_vma_add(priv->bo, cli->vm, &fctx->vma);
        if (ret == 0) {
                ret = nouveau_bo_vma_add(priv->bo_gart, cli->vm,
                                        &fctx->vma_gart);
        }
+       mutex_unlock(&priv->mutex);
 
        if (ret)
                nv84_fence_context_del(chan);
@@ -212,6 +216,8 @@ nv84_fence_create(struct nouveau_drm *drm)
        priv->base.context_base = dma_fence_context_alloc(priv->base.contexts);
        priv->base.uevent = true;
 
+       mutex_init(&priv->mutex);
+
        /* Use VRAM if there is any ; otherwise fallback to system memory */
        domain = drm->device.info.ram_size != 0 ? TTM_PL_FLAG_VRAM :
                         /*
index 6f0436df021953337ba29f0ff9c02c507214b07b..f8f2f16c22a2a2502bf283c63a5d1fc124d0ff89 100644 (file)
@@ -59,7 +59,7 @@ gt215_hda_eld(NV50_DISP_MTHD_V1)
                        );
                }
                for (i = 0; i < size; i++)
-                       nvkm_wr32(device, 0x61c440 + soff, (i << 8) | args->v0.data[0]);
+                       nvkm_wr32(device, 0x61c440 + soff, (i << 8) | args->v0.data[i]);
                for (; i < 0x60; i++)
                        nvkm_wr32(device, 0x61c440 + soff, (i << 8));
                nvkm_mask(device, 0x61c448 + soff, 0x80000003, 0x80000003);
index 567466f93cd5d9645020e3b89c282808d88397e4..0db8efbf1c2e2e9cd84b689098bdba9d666843bc 100644 (file)
@@ -433,8 +433,6 @@ nv50_disp_dptmds_war(struct nvkm_device *device)
        case 0x94:
        case 0x96:
        case 0x98:
-       case 0xaa:
-       case 0xac:
                return true;
        default:
                break;
index 4a90c690f09e4e7b7bb8645ad4e09b2d68cbd0e5..74a9968df421dfdc888443b0c2fd387f8f53dd1d 100644 (file)
@@ -1033,7 +1033,7 @@ void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
        off = drm_vma_node_start(&obj->vma_node);
 
        seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d",
-                       omap_obj->flags, obj->name, obj->refcount.refcount.counter,
+                       omap_obj->flags, obj->name, kref_read(&obj->refcount),
                        off, &omap_obj->paddr, omap_obj->paddr_cnt,
                        omap_obj->vaddr, omap_obj->roll);
 
index fb16070b266e3f2de51243ba478f57d8fd956420..4a4f9533c53b888ab10fb0f4839fc4a5df852bd8 100644 (file)
@@ -205,8 +205,8 @@ static int radeon_cursor_move_locked(struct drm_crtc *crtc, int x, int y)
        }
 
        if (x <= (crtc->x - w) || y <= (crtc->y - radeon_crtc->cursor_height) ||
-           x >= (crtc->x + crtc->mode.crtc_hdisplay) ||
-           y >= (crtc->y + crtc->mode.crtc_vdisplay))
+           x >= (crtc->x + crtc->mode.hdisplay) ||
+           y >= (crtc->y + crtc->mode.vdisplay))
                goto out_of_bounds;
 
        x += xorigin;
index 00ea0002b539b9e9b5b0a063f62deb3b7638fd56..30bd4a6a9d466e11755bf95cee6929473ae65adf 100644 (file)
  *   2.46.0 - Add PFP_SYNC_ME support on evergreen
  *   2.47.0 - Add UVD_NO_OP register support
  *   2.48.0 - TA_CS_BC_BASE_ADDR allowed on SI
+ *   2.49.0 - DRM_RADEON_GEM_INFO ioctl returns correct vram_size/visible values
  */
 #define KMS_DRIVER_MAJOR       2
-#define KMS_DRIVER_MINOR       48
+#define KMS_DRIVER_MINOR       49
 #define KMS_DRIVER_PATCHLEVEL  0
 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
 int radeon_driver_unload_kms(struct drm_device *dev);
@@ -366,11 +367,10 @@ static void
 radeon_pci_shutdown(struct pci_dev *pdev)
 {
        /* if we are running in a VM, make sure the device
-        * torn down properly on reboot/shutdown.
-        * unfortunately we can't detect certain
-        * hypervisors so just do this all the time.
+        * torn down properly on reboot/shutdown
         */
-       radeon_pci_remove(pdev);
+       if (radeon_device_is_virtual())
+               radeon_pci_remove(pdev);
 }
 
 static int radeon_pmops_suspend(struct device *dev)
index 0bcffd8a7bd3ceac0de37cd0b44344011963be77..96683f5b2b1b722db08de97550aa8cdf99444a1c 100644 (file)
@@ -220,8 +220,8 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
 
        man = &rdev->mman.bdev.man[TTM_PL_VRAM];
 
-       args->vram_size = rdev->mc.real_vram_size;
-       args->vram_visible = (u64)man->size << PAGE_SHIFT;
+       args->vram_size = (u64)man->size << PAGE_SHIFT;
+       args->vram_visible = rdev->mc.visible_vram_size;
        args->vram_visible -= rdev->vram_pin_size;
        args->gart_size = rdev->mc.gtt_size;
        args->gart_size -= rdev->gart_pin_size;
index e8a38d29685547a69fb1d657e833cd9cf5637cff..414776811e71e0a4e9f7c97309b6c173c8f502a3 100644 (file)
@@ -114,6 +114,9 @@ MODULE_FIRMWARE("radeon/hainan_mc.bin");
 MODULE_FIRMWARE("radeon/hainan_rlc.bin");
 MODULE_FIRMWARE("radeon/hainan_smc.bin");
 MODULE_FIRMWARE("radeon/hainan_k_smc.bin");
+MODULE_FIRMWARE("radeon/banks_k_2_smc.bin");
+
+MODULE_FIRMWARE("radeon/si58_mc.bin");
 
 static u32 si_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh);
 static void si_pcie_gen3_enable(struct radeon_device *rdev);
@@ -1650,6 +1653,8 @@ static int si_init_microcode(struct radeon_device *rdev)
        int err;
        int new_fw = 0;
        bool new_smc = false;
+       bool si58_fw = false;
+       bool banks2_fw = false;
 
        DRM_DEBUG("\n");
 
@@ -1727,10 +1732,11 @@ static int si_init_microcode(struct radeon_device *rdev)
                     ((rdev->pdev->device == 0x6660) ||
                      (rdev->pdev->device == 0x6663) ||
                      (rdev->pdev->device == 0x6665) ||
-                     (rdev->pdev->device == 0x6667))) ||
-                   ((rdev->pdev->revision == 0xc3) &&
-                    (rdev->pdev->device == 0x6665)))
+                     (rdev->pdev->device == 0x6667))))
                        new_smc = true;
+               else if ((rdev->pdev->revision == 0xc3) &&
+                        (rdev->pdev->device == 0x6665))
+                       banks2_fw = true;
                new_chip_name = "hainan";
                pfp_req_size = SI_PFP_UCODE_SIZE * 4;
                me_req_size = SI_PM4_UCODE_SIZE * 4;
@@ -1742,6 +1748,10 @@ static int si_init_microcode(struct radeon_device *rdev)
        default: BUG();
        }
 
+       /* this memory configuration requires special firmware */
+       if (((RREG32(MC_SEQ_MISC0) & 0xff000000) >> 24) == 0x58)
+               si58_fw = true;
+
        DRM_INFO("Loading %s Microcode\n", new_chip_name);
 
        snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", new_chip_name);
@@ -1845,7 +1855,10 @@ static int si_init_microcode(struct radeon_device *rdev)
                }
        }
 
-       snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", new_chip_name);
+       if (si58_fw)
+               snprintf(fw_name, sizeof(fw_name), "radeon/si58_mc.bin");
+       else
+               snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", new_chip_name);
        err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
        if (err) {
                snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc2.bin", chip_name);
@@ -1876,7 +1889,9 @@ static int si_init_microcode(struct radeon_device *rdev)
                }
        }
 
-       if (new_smc)
+       if (banks2_fw)
+               snprintf(fw_name, sizeof(fw_name), "radeon/banks_k_2_smc.bin");
+       else if (new_smc)
                snprintf(fw_name, sizeof(fw_name), "radeon/%s_k_smc.bin", new_chip_name);
        else
                snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", new_chip_name);
index 13ba73fd9b68849bd34d27207eb282eb6ff62793..2944916f7102ae0395d11157f366c199d398076d 100644 (file)
@@ -3008,17 +3008,6 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
                    (rdev->pdev->device == 0x6817) ||
                    (rdev->pdev->device == 0x6806))
                        max_mclk = 120000;
-       } else if (rdev->family == CHIP_OLAND) {
-               if ((rdev->pdev->revision == 0xC7) ||
-                   (rdev->pdev->revision == 0x80) ||
-                   (rdev->pdev->revision == 0x81) ||
-                   (rdev->pdev->revision == 0x83) ||
-                   (rdev->pdev->revision == 0x87) ||
-                   (rdev->pdev->device == 0x6604) ||
-                   (rdev->pdev->device == 0x6605)) {
-                       max_sclk = 75000;
-                       max_mclk = 80000;
-               }
        } else if (rdev->family == CHIP_HAINAN) {
                if ((rdev->pdev->revision == 0x81) ||
                    (rdev->pdev->revision == 0x83) ||
@@ -3027,7 +3016,6 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
                    (rdev->pdev->device == 0x6665) ||
                    (rdev->pdev->device == 0x6667)) {
                        max_sclk = 75000;
-                       max_mclk = 80000;
                }
        }
        /* Apply dpm quirks */
index d5063618efa773757ff1608f3e0caf535ef2cc85..ffc6cb55c78c149a1c53314847e2cf8076050a29 100644 (file)
@@ -140,8 +140,8 @@ static void ttm_bo_release_list(struct kref *list_kref)
        struct ttm_bo_device *bdev = bo->bdev;
        size_t acc_size = bo->acc_size;
 
-       BUG_ON(atomic_read(&bo->list_kref.refcount));
-       BUG_ON(atomic_read(&bo->kref.refcount));
+       BUG_ON(kref_read(&bo->list_kref));
+       BUG_ON(kref_read(&bo->kref));
        BUG_ON(atomic_read(&bo->cpu_writers));
        BUG_ON(bo->mem.mm_node != NULL);
        BUG_ON(!list_empty(&bo->lru));
@@ -181,61 +181,46 @@ void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
 }
 EXPORT_SYMBOL(ttm_bo_add_to_lru);
 
-int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
+static void ttm_bo_ref_bug(struct kref *list_kref)
+{
+       BUG();
+}
+
+void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
 {
        struct ttm_bo_device *bdev = bo->bdev;
-       int put_count = 0;
 
        if (bdev->driver->lru_removal)
                bdev->driver->lru_removal(bo);
 
        if (!list_empty(&bo->swap)) {
                list_del_init(&bo->swap);
-               ++put_count;
+               kref_put(&bo->list_kref, ttm_bo_ref_bug);
        }
        if (!list_empty(&bo->lru)) {
                list_del_init(&bo->lru);
-               ++put_count;
+               kref_put(&bo->list_kref, ttm_bo_ref_bug);
        }
-
-       return put_count;
-}
-
-static void ttm_bo_ref_bug(struct kref *list_kref)
-{
-       BUG();
-}
-
-void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count,
-                        bool never_free)
-{
-       kref_sub(&bo->list_kref, count,
-                (never_free) ? ttm_bo_ref_bug : ttm_bo_release_list);
 }
 
 void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo)
 {
-       int put_count;
-
        spin_lock(&bo->glob->lru_lock);
-       put_count = ttm_bo_del_from_lru(bo);
+       ttm_bo_del_from_lru(bo);
        spin_unlock(&bo->glob->lru_lock);
-       ttm_bo_list_ref_sub(bo, put_count, true);
 }
 EXPORT_SYMBOL(ttm_bo_del_sub_from_lru);
 
 void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo)
 {
        struct ttm_bo_device *bdev = bo->bdev;
-       int put_count = 0;
 
        lockdep_assert_held(&bo->resv->lock.base);
 
        if (bdev->driver->lru_removal)
                bdev->driver->lru_removal(bo);
 
-       put_count = ttm_bo_del_from_lru(bo);
-       ttm_bo_list_ref_sub(bo, put_count, true);
+       ttm_bo_del_from_lru(bo);
        ttm_bo_add_to_lru(bo);
 }
 EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);
@@ -447,7 +432,6 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
 {
        struct ttm_bo_device *bdev = bo->bdev;
        struct ttm_bo_global *glob = bo->glob;
-       int put_count;
        int ret;
 
        spin_lock(&glob->lru_lock);
@@ -455,13 +439,10 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
 
        if (!ret) {
                if (!ttm_bo_wait(bo, false, true)) {
-                       put_count = ttm_bo_del_from_lru(bo);
-
+                       ttm_bo_del_from_lru(bo);
                        spin_unlock(&glob->lru_lock);
                        ttm_bo_cleanup_memtype_use(bo);
 
-                       ttm_bo_list_ref_sub(bo, put_count, true);
-
                        return;
                } else
                        ttm_bo_flush_all_fences(bo);
@@ -504,7 +485,6 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
                                          bool no_wait_gpu)
 {
        struct ttm_bo_global *glob = bo->glob;
-       int put_count;
        int ret;
 
        ret = ttm_bo_wait(bo, false, true);
@@ -554,15 +534,13 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
                return ret;
        }
 
-       put_count = ttm_bo_del_from_lru(bo);
+       ttm_bo_del_from_lru(bo);
        list_del_init(&bo->ddestroy);
-       ++put_count;
+       kref_put(&bo->list_kref, ttm_bo_ref_bug);
 
        spin_unlock(&glob->lru_lock);
        ttm_bo_cleanup_memtype_use(bo);
 
-       ttm_bo_list_ref_sub(bo, put_count, true);
-
        return 0;
 }
 
@@ -740,7 +718,7 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
        struct ttm_bo_global *glob = bdev->glob;
        struct ttm_mem_type_manager *man = &bdev->man[mem_type];
        struct ttm_buffer_object *bo;
-       int ret = -EBUSY, put_count;
+       int ret = -EBUSY;
 
        spin_lock(&glob->lru_lock);
        list_for_each_entry(bo, &man->lru, lru) {
@@ -771,13 +749,11 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
                return ret;
        }
 
-       put_count = ttm_bo_del_from_lru(bo);
+       ttm_bo_del_from_lru(bo);
        spin_unlock(&glob->lru_lock);
 
        BUG_ON(ret != 0);
 
-       ttm_bo_list_ref_sub(bo, put_count, true);
-
        ret = ttm_bo_evict(bo, interruptible, no_wait_gpu);
        ttm_bo_unreserve(bo);
 
@@ -1669,7 +1645,6 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
            container_of(shrink, struct ttm_bo_global, shrink);
        struct ttm_buffer_object *bo;
        int ret = -EBUSY;
-       int put_count;
        uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
 
        spin_lock(&glob->lru_lock);
@@ -1692,11 +1667,9 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
                return ret;
        }
 
-       put_count = ttm_bo_del_from_lru(bo);
+       ttm_bo_del_from_lru(bo);
        spin_unlock(&glob->lru_lock);
 
-       ttm_bo_list_ref_sub(bo, put_count, true);
-
        /**
         * Move to system cached
         */
index d35bc491e8debe3c5f90f6c9af3e1588c6bfab70..5e1bcabffef56abb33262fe8ea94efa4416c9612 100644 (file)
@@ -48,9 +48,7 @@ static void ttm_eu_del_from_lru_locked(struct list_head *list)
 
        list_for_each_entry(entry, list, head) {
                struct ttm_buffer_object *bo = entry->bo;
-               unsigned put_count = ttm_bo_del_from_lru(bo);
-
-               ttm_bo_list_ref_sub(bo, put_count, true);
+               ttm_bo_del_from_lru(bo);
        }
 }
 
index 4f5fa8d65fe932a7e10803d10d0e0fc3090a7d59..fdb451e3ec01184a4642e6facd6ddf8e5f0cad47 100644 (file)
@@ -304,7 +304,7 @@ bool ttm_ref_object_exists(struct ttm_object_file *tfile,
         * Verify that the ref->obj pointer was actually valid!
         */
        rmb();
-       if (unlikely(atomic_read(&ref->kref.refcount) == 0))
+       if (unlikely(kref_read(&ref->kref) == 0))
                goto out_false;
 
        rcu_read_unlock();
index a0fd3e66bc4b39c1f5092c340ad2c705bbf431f3..7aadce1f7e7a0e56b302bded53ad8c0d5977cc22 100644 (file)
@@ -839,7 +839,7 @@ static void vc4_crtc_destroy_state(struct drm_crtc *crtc,
 
        }
 
-       __drm_atomic_helper_crtc_destroy_state(state);
+       drm_atomic_helper_crtc_destroy_state(crtc, state);
 }
 
 static const struct drm_crtc_funcs vc4_crtc_funcs = {
index db920771bfb5641c9d5d8e53a8f73cf7ee501bb7..ab3016982466c3ca35ba479050ee107d26eb50ac 100644 (file)
@@ -594,12 +594,14 @@ vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
                                          args->shader_rec_count);
        struct vc4_bo *bo;
 
-       if (uniforms_offset < shader_rec_offset ||
+       if (shader_rec_offset < args->bin_cl_size ||
+           uniforms_offset < shader_rec_offset ||
            exec_size < uniforms_offset ||
            args->shader_rec_count >= (UINT_MAX /
                                          sizeof(struct vc4_shader_state)) ||
            temp_size < exec_size) {
                DRM_ERROR("overflow in exec arguments\n");
+               ret = -EINVAL;
                goto fail;
        }
 
index 881bf489478b01b34e9e4df6013fe608c42215ee..686cdd3c86f2e9178768282a0dd173850e0bf063 100644 (file)
@@ -858,7 +858,7 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev,
                }
        }
        plane = &vc4_plane->base;
-       ret = drm_universal_plane_init(dev, plane, 0xff,
+       ret = drm_universal_plane_init(dev, plane, 0,
                                       &vc4_plane_funcs,
                                       formats, num_formats,
                                       type, NULL);
index 08886a3097577242f5c9e025fd6446d81bcc6dec..5cdd003605f57c99faf31832e3f3dd38a75b7402 100644 (file)
@@ -461,7 +461,7 @@ static int vc4_rcl_surface_setup(struct vc4_exec_info *exec,
                }
 
                ret = vc4_full_res_bounds_check(exec, *obj, surf);
-               if (!ret)
+               if (ret)
                        return ret;
 
                return 0;
index dd21f950e129d4f96d71120d64e3ab499645333d..cde9f37581064828809555bb4808c38f7497d94e 100644 (file)
@@ -331,7 +331,7 @@ static int virtio_gpufb_create(struct drm_fb_helper *helper,
        info->fbops = &virtio_gpufb_ops;
        info->pixmap.flags = FB_PIXMAP_SYSTEM;
 
-       info->screen_base = obj->vmap;
+       info->screen_buffer = obj->vmap;
        info->screen_size = obj->gem_base.size;
        drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
        drm_fb_helper_fill_var(info, &vfbdev->helper,
index 723fd763da8e3c49de716207cc68eb46080552c1..7a96798b9c0ac611f588c5b8d80269ed23dc157e 100644 (file)
@@ -481,8 +481,7 @@ static int vmw_fb_kms_framebuffer(struct fb_info *info)
        mode_cmd.height = var->yres;
        mode_cmd.pitches[0] = ((var->bits_per_pixel + 7) / 8) * mode_cmd.width;
        mode_cmd.pixel_format =
-               drm_mode_legacy_fb_format(var->bits_per_pixel,
-                       ((var->bits_per_pixel + 7) / 8) * mode_cmd.width);
+               drm_mode_legacy_fb_format(var->bits_per_pixel, depth);
 
        cur_fb = par->set_fb;
        if (cur_fb && cur_fb->width == mode_cmd.width &&
index 4070b7386e9dea9965cc49ab077eb64ffc1f2455..1aeb80e5242461830f1d4075f0fb59bcb6ddc898 100644 (file)
@@ -785,6 +785,11 @@ config HID_SUNPLUS
 config HID_RMI
        tristate "Synaptics RMI4 device support"
        depends on HID
+       select RMI4_CORE
+       select RMI4_F03
+       select RMI4_F11
+       select RMI4_F12
+       select RMI4_F30
        ---help---
        Support for Synaptics RMI4 touchpads.
        Say Y here if you have a Synaptics RMI4 touchpads over i2c-hid or usbhid
index ea36b557d5eea87b27171937fe9bc05f680b15a8..538ff697a4cfe28bb27a8c1c7e87391c7d894415 100644 (file)
@@ -43,7 +43,6 @@
  */
 
 #define DRIVER_DESC "HID core driver"
-#define DRIVER_LICENSE "GPL"
 
 int hid_debug = 0;
 module_param_named(debug, hid_debug, int, 0600);
@@ -724,13 +723,7 @@ static void hid_scan_collection(struct hid_parser *parser, unsigned type)
                hid->group = HID_GROUP_SENSOR_HUB;
 
        if (hid->vendor == USB_VENDOR_ID_MICROSOFT &&
-           (hid->product == USB_DEVICE_ID_MS_TYPE_COVER_PRO_3 ||
-            hid->product == USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_2 ||
-            hid->product == USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP ||
-            hid->product == USB_DEVICE_ID_MS_TYPE_COVER_PRO_4 ||
-            hid->product == USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_2 ||
-            hid->product == USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_JP ||
-            hid->product == USB_DEVICE_ID_MS_POWER_COVER) &&
+           hid->product == USB_DEVICE_ID_MS_POWER_COVER &&
            hid->group == HID_GROUP_MULTITOUCH)
                hid->group = HID_GROUP_GENERIC;
 
@@ -826,7 +819,8 @@ static int hid_scan_report(struct hid_device *hid)
                hid->group = HID_GROUP_WACOM;
                break;
        case USB_VENDOR_ID_SYNAPTICS:
-               if (hid->group == HID_GROUP_GENERIC)
+               if (hid->group == HID_GROUP_GENERIC ||
+                   hid->group == HID_GROUP_MULTITOUCH_WIN_8)
                        if ((parser->scan_flags & HID_SCAN_FLAG_VENDOR_SPECIFIC)
                            && (parser->scan_flags & HID_SCAN_FLAG_GD_POINTER))
                                /*
@@ -1887,6 +1881,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0011) },
 #if IS_ENABLED(CONFIG_HID_MAYFLASH)
        { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE1) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE2) },
 #endif
        { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_WN) },
        { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_FA) },
@@ -1933,6 +1930,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CBTKBD) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPPRODOCK) },
 #endif
+       { HID_USB_DEVICE(USB_VENDOR_ID_LG, USB_DEVICE_ID_LG_MELFAS_MT) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER_2) },
@@ -1985,12 +1983,6 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K) },
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0) },
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_OFFICE_KB) },
-       { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3) },
-       { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_2) },
-       { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP) },
-       { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4) },
-       { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_2) },
-       { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_JP) },
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_7K) },
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_600) },
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3KV1) },
@@ -2126,6 +2118,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE2) },
        { HID_USB_DEVICE(USB_VENDOR_ID_RAZER, USB_DEVICE_ID_RAZER_BLADE_14) },
        { HID_USB_DEVICE(USB_VENDOR_ID_CMEDIA, USB_DEVICE_ID_CM6533) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X1_COVER) },
        { }
 };
 
@@ -2314,7 +2307,7 @@ __ATTRIBUTE_GROUPS(hid_dev);
 
 static int hid_uevent(struct device *dev, struct kobj_uevent_env *env)
 {
-       struct hid_device *hdev = to_hid_device(dev);   
+       struct hid_device *hdev = to_hid_device(dev);
 
        if (add_uevent_var(env, "HID_ID=%04X:%08X:%08X",
                        hdev->bus, hdev->vendor, hdev->product))
@@ -2867,5 +2860,5 @@ module_exit(hid_exit);
 MODULE_AUTHOR("Andreas Gal");
 MODULE_AUTHOR("Vojtech Pavlik");
 MODULE_AUTHOR("Jiri Kosina");
-MODULE_LICENSE(DRIVER_LICENSE);
+MODULE_LICENSE("GPL");
 
index f31a778b085148fea4a52599c6b89be203889781..b22d0f83f8e38a9ee0d0eb7381e95d6b90442b61 100644 (file)
@@ -168,7 +168,7 @@ struct cp2112_device {
        atomic_t xfer_avail;
        struct gpio_chip gc;
        u8 *in_out_buffer;
-       spinlock_t lock;
+       struct mutex lock;
 
        struct gpio_desc *desc[8];
        bool gpio_poll;
@@ -186,10 +186,9 @@ static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
        struct cp2112_device *dev = gpiochip_get_data(chip);
        struct hid_device *hdev = dev->hdev;
        u8 *buf = dev->in_out_buffer;
-       unsigned long flags;
        int ret;
 
-       spin_lock_irqsave(&dev->lock, flags);
+       mutex_lock(&dev->lock);
 
        ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
                                 CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
@@ -213,8 +212,8 @@ static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
        ret = 0;
 
 exit:
-       spin_unlock_irqrestore(&dev->lock, flags);
-       return ret <= 0 ? ret : -EIO;
+       mutex_unlock(&dev->lock);
+       return ret < 0 ? ret : -EIO;
 }
 
 static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
@@ -222,10 +221,9 @@ static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
        struct cp2112_device *dev = gpiochip_get_data(chip);
        struct hid_device *hdev = dev->hdev;
        u8 *buf = dev->in_out_buffer;
-       unsigned long flags;
        int ret;
 
-       spin_lock_irqsave(&dev->lock, flags);
+       mutex_lock(&dev->lock);
 
        buf[0] = CP2112_GPIO_SET;
        buf[1] = value ? 0xff : 0;
@@ -237,7 +235,7 @@ static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
        if (ret < 0)
                hid_err(hdev, "error setting GPIO values: %d\n", ret);
 
-       spin_unlock_irqrestore(&dev->lock, flags);
+       mutex_unlock(&dev->lock);
 }
 
 static int cp2112_gpio_get_all(struct gpio_chip *chip)
@@ -245,10 +243,9 @@ static int cp2112_gpio_get_all(struct gpio_chip *chip)
        struct cp2112_device *dev = gpiochip_get_data(chip);
        struct hid_device *hdev = dev->hdev;
        u8 *buf = dev->in_out_buffer;
-       unsigned long flags;
        int ret;
 
-       spin_lock_irqsave(&dev->lock, flags);
+       mutex_lock(&dev->lock);
 
        ret = hid_hw_raw_request(hdev, CP2112_GPIO_GET, buf,
                                 CP2112_GPIO_GET_LENGTH, HID_FEATURE_REPORT,
@@ -262,7 +259,7 @@ static int cp2112_gpio_get_all(struct gpio_chip *chip)
        ret = buf[1];
 
 exit:
-       spin_unlock_irqrestore(&dev->lock, flags);
+       mutex_unlock(&dev->lock);
 
        return ret;
 }
@@ -284,10 +281,9 @@ static int cp2112_gpio_direction_output(struct gpio_chip *chip,
        struct cp2112_device *dev = gpiochip_get_data(chip);
        struct hid_device *hdev = dev->hdev;
        u8 *buf = dev->in_out_buffer;
-       unsigned long flags;
        int ret;
 
-       spin_lock_irqsave(&dev->lock, flags);
+       mutex_lock(&dev->lock);
 
        ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
                                 CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
@@ -308,7 +304,7 @@ static int cp2112_gpio_direction_output(struct gpio_chip *chip,
                goto fail;
        }
 
-       spin_unlock_irqrestore(&dev->lock, flags);
+       mutex_unlock(&dev->lock);
 
        /*
         * Set gpio value when output direction is already set,
@@ -319,7 +315,7 @@ static int cp2112_gpio_direction_output(struct gpio_chip *chip,
        return 0;
 
 fail:
-       spin_unlock_irqrestore(&dev->lock, flags);
+       mutex_unlock(&dev->lock);
        return ret < 0 ? ret : -EIO;
 }
 
@@ -1235,7 +1231,7 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
        if (!dev->in_out_buffer)
                return -ENOMEM;
 
-       spin_lock_init(&dev->lock);
+       mutex_init(&dev->lock);
 
        ret = hid_parse(hdev);
        if (ret) {
index f46f2c5117fae76a1c87105363e5c8db4c8673a3..86c95d30ac801f2895caef97a575955289d352a4 100644 (file)
@@ -76,6 +76,9 @@
 #define USB_VENDOR_ID_ALPS_JP          0x044E
 #define HID_DEVICE_ID_ALPS_U1_DUAL     0x120B
 
+#define USB_VENDOR_ID_AMI              0x046b
+#define USB_DEVICE_ID_AMI_VIRT_KEYBOARD_AND_MOUSE      0xff10
+
 #define USB_VENDOR_ID_ANTON            0x1130
 #define USB_DEVICE_ID_ANTON_TOUCH_PAD  0x3101
 
 #define USB_DEVICE_ID_DRAGONRISE_WIIU          0x1800
 #define USB_DEVICE_ID_DRAGONRISE_PS3           0x1801
 #define USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR    0x1803
-#define USB_DEVICE_ID_DRAGONRISE_GAMECUBE      0x1843
+#define USB_DEVICE_ID_DRAGONRISE_GAMECUBE1     0x1843
+#define USB_DEVICE_ID_DRAGONRISE_GAMECUBE2     0x1844
 
 #define USB_VENDOR_ID_DWAV             0x0eef
 #define USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER   0x0001
 #define USB_DEVICE_ID_LENOVO_CUSBKBD   0x6047
 #define USB_DEVICE_ID_LENOVO_CBTKBD    0x6048
 #define USB_DEVICE_ID_LENOVO_TPPRODOCK 0x6067
+#define USB_DEVICE_ID_LENOVO_X1_COVER  0x6085
 
 #define USB_VENDOR_ID_LG               0x1fd2
 #define USB_DEVICE_ID_LG_MULTITOUCH    0x0064
+#define USB_DEVICE_ID_LG_MELFAS_MT     0x6007
 
 #define USB_VENDOR_ID_LOGITECH         0x046d
 #define USB_DEVICE_ID_LOGITECH_AUDIOHUB 0x0a0e
 #define USB_DEVICE_ID_MS_SURFACE_PRO_2   0x0799
 #define USB_DEVICE_ID_MS_TOUCH_COVER_2   0x07a7
 #define USB_DEVICE_ID_MS_TYPE_COVER_2    0x07a9
-#define USB_DEVICE_ID_MS_TYPE_COVER_PRO_3    0x07dc
-#define USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_2  0x07e2
-#define USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP 0x07dd
-#define USB_DEVICE_ID_MS_TYPE_COVER_PRO_4 0x07e4
-#define USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_2 0x07e8
-#define USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_JP 0x07e9
 #define USB_DEVICE_ID_MS_POWER_COVER     0x07da
 
 #define USB_VENDOR_ID_MOJO             0x8282
index c5c5fbe9d60577f44085d86a7fb5cf60efb6acd3..52026dc94d5c4b0306ce585be293cbe2cb1910d9 100644 (file)
@@ -872,7 +872,7 @@ static const struct hid_device_id lg_devices[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_FFG),
                .driver_data = LG_NOGET | LG_FF4 },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2),
-               .driver_data = LG_FF2 },
+               .driver_data = LG_NOGET | LG_FF2 },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FLIGHT_SYSTEM_G940),
                .driver_data = LG_FF3 },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR),
index d9090765a6e5ef19be3f9750c5e635400ad21d94..03f10516131df39f1f402adbd6c7e19b1bef0fcd 100644 (file)
@@ -6,12 +6,14 @@
  *
  * Tested with:
  * 0079:1801 "DragonRise Inc. Mayflash PS3 Game Controller Adapter"
+ * 0079:1803 "DragonRise Inc. Mayflash Wireless Sensor DolphinBar"
+ * 0079:1843 "DragonRise Inc. Mayflash GameCube Game Controller Adapter"
+ * 0079:1844 "DragonRise Inc. Mayflash GameCube Game Controller Adapter (v04)"
  *
  * The following adapters probably work too, but need to be tested:
  * 0079:1800 "DragonRise Inc. Mayflash WIIU Game Controller Adapter"
- * 0079:1843 "DragonRise Inc. Mayflash GameCube Game Controller Adapter"
  *
- * Copyright (c) 2016 Marcel Hasler <mahasler@gmail.com>
+ * Copyright (c) 2016-2017 Marcel Hasler <mahasler@gmail.com>
  */
 
 /*
@@ -125,8 +127,8 @@ static int mf_probe(struct hid_device *hid, const struct hid_device_id *id)
 
        dev_dbg(&hid->dev, "Mayflash HID hardware probe...\n");
 
-       /* Split device into four inputs */
-       hid->quirks |= HID_QUIRK_MULTI_INPUT;
+       /* Apply quirks as needed */
+       hid->quirks |= id->driver_data;
 
        error = hid_parse(hid);
        if (error) {
@@ -151,7 +153,14 @@ static int mf_probe(struct hid_device *hid, const struct hid_device_id *id)
 }
 
 static const struct hid_device_id mf_devices[] = {
-       { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3),  },
+       { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3),
+               .driver_data = HID_QUIRK_MULTI_INPUT },
+       { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR),
+               .driver_data = HID_QUIRK_MULTI_INPUT },
+       { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE1),
+               .driver_data = HID_QUIRK_MULTI_INPUT },
+       { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE2),
+               .driver_data = 0 }, /* No quirk required */
        { }
 };
 MODULE_DEVICE_TABLE(hid, mf_devices);
index 74b7b84a0420b7e40ec0f11868d1d3ee187ebe6a..96e7d3231d2fbacf7f92f11915a9a4ef000a0920 100644 (file)
@@ -274,18 +274,6 @@ static const struct hid_device_id ms_devices[] = {
                .driver_data = MS_NOGET },
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500),
                .driver_data = MS_DUPLICATE_USAGES },
-       { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3),
-               .driver_data = MS_HIDINPUT },
-       { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_2),
-               .driver_data = MS_HIDINPUT },
-       { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP),
-               .driver_data = MS_HIDINPUT },
-       { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4),
-               .driver_data = MS_HIDINPUT },
-       { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_2),
-               .driver_data = MS_HIDINPUT },
-       { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_JP),
-               .driver_data = MS_HIDINPUT },
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER),
                .driver_data = MS_HIDINPUT },
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_KEYBOARD),
index 6dca668068440213d837f69c7763bddbe91e561b..692647485a53b3316ec442518b0b84648113dda9 100644 (file)
@@ -68,6 +68,7 @@ MODULE_LICENSE("GPL");
 #define MT_QUIRK_HOVERING              (1 << 11)
 #define MT_QUIRK_CONTACT_CNT_ACCURATE  (1 << 12)
 #define MT_QUIRK_FORCE_GET_FEATURE     (1 << 13)
+#define MT_QUIRK_FIX_CONST_CONTACT_ID  (1 << 14)
 
 #define MT_INPUTMODE_TOUCHSCREEN       0x02
 #define MT_INPUTMODE_TOUCHPAD          0x03
@@ -157,6 +158,7 @@ static void mt_post_parse(struct mt_device *td);
 #define MT_CLS_FLATFROG                                0x0107
 #define MT_CLS_GENERALTOUCH_TWOFINGERS         0x0108
 #define MT_CLS_GENERALTOUCH_PWT_TENFINGERS     0x0109
+#define MT_CLS_LG                              0x010a
 #define MT_CLS_VTL                             0x0110
 
 #define MT_DEFAULT_MAXCONTACT  10
@@ -263,6 +265,12 @@ static struct mt_class mt_classes[] = {
                .sn_move = 2048,
                .maxcontacts = 40,
        },
+       { .name = MT_CLS_LG,
+               .quirks = MT_QUIRK_ALWAYS_VALID |
+                       MT_QUIRK_FIX_CONST_CONTACT_ID |
+                       MT_QUIRK_IGNORE_DUPLICATES |
+                       MT_QUIRK_HOVERING |
+                       MT_QUIRK_CONTACT_CNT_ACCURATE },
        { .name = MT_CLS_VTL,
                .quirks = MT_QUIRK_ALWAYS_VALID |
                        MT_QUIRK_CONTACT_CNT_ACCURATE |
@@ -1078,6 +1086,34 @@ static int mt_input_configured(struct hid_device *hdev, struct hid_input *hi)
        return 0;
 }
 
+static void mt_fix_const_field(struct hid_field *field, unsigned int usage)
+{
+       if (field->usage[0].hid != usage ||
+           !(field->flags & HID_MAIN_ITEM_CONSTANT))
+               return;
+
+       field->flags &= ~HID_MAIN_ITEM_CONSTANT;
+       field->flags |= HID_MAIN_ITEM_VARIABLE;
+}
+
+static void mt_fix_const_fields(struct hid_device *hdev, unsigned int usage)
+{
+       struct hid_report *report;
+       int i;
+
+       list_for_each_entry(report,
+                           &hdev->report_enum[HID_INPUT_REPORT].report_list,
+                           list) {
+
+               if (!report->maxfield)
+                       continue;
+
+               for (i = 0; i < report->maxfield; i++)
+                       if (report->field[i]->maxusage >= 1)
+                               mt_fix_const_field(report->field[i], usage);
+       }
+}
+
 static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
 {
        int ret, i;
@@ -1151,6 +1187,9 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
        if (ret != 0)
                return ret;
 
+       if (mtclass->quirks & MT_QUIRK_FIX_CONST_CONTACT_ID)
+               mt_fix_const_fields(hdev, HID_DG_CONTACTID);
+
        ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
        if (ret)
                return ret;
@@ -1398,6 +1437,11 @@ static const struct hid_device_id mt_devices[] = {
                MT_USB_DEVICE(USB_VENDOR_ID_ILITEK,
                        USB_DEVICE_ID_ILITEK_MULTITOUCH) },
 
+       /* LG Melfas panel */
+       { .driver_data = MT_CLS_LG,
+               HID_USB_DEVICE(USB_VENDOR_ID_LG,
+                       USB_DEVICE_ID_LG_MELFAS_MT) },
+
        /* MosArt panels */
        { .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE,
                MT_USB_DEVICE(USB_VENDOR_ID_ASUS,
index be89bcbf6a71b23f266c8bb0b38b0aa66cca1ae0..5b40c26145993fafa51530dcb573bad0e971fb89 100644 (file)
 #include <linux/hid.h>
 #include <linux/input.h>
 #include <linux/input/mt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
 #include <linux/module.h>
 #include <linux/pm.h>
 #include <linux/slab.h>
 #include <linux/wait.h>
 #include <linux/sched.h>
+#include <linux/rmi.h>
 #include "hid-ids.h"
 
 #define RMI_MOUSE_REPORT_ID            0x01 /* Mouse emulation Report */
@@ -33,9 +36,6 @@
 #define RMI_READ_DATA_PENDING          1
 #define RMI_STARTED                    2
 
-#define RMI_SLEEP_NORMAL               0x0
-#define RMI_SLEEP_DEEP_SLEEP           0x1
-
 /* device flags */
 #define RMI_DEVICE                     BIT(0)
 #define RMI_DEVICE_HAS_PHYS_BUTTONS    BIT(1)
@@ -54,25 +54,12 @@ enum rmi_mode_type {
        RMI_MODE_NO_PACKED_ATTN_REPORTS = 2,
 };
 
-struct rmi_function {
-       unsigned page;                  /* page of the function */
-       u16 query_base_addr;            /* base address for queries */
-       u16 command_base_addr;          /* base address for commands */
-       u16 control_base_addr;          /* base address for controls */
-       u16 data_base_addr;             /* base address for datas */
-       unsigned int interrupt_base;    /* cross-function interrupt number
-                                        * (uniq in the device)*/
-       unsigned int interrupt_count;   /* number of interrupts */
-       unsigned int report_size;       /* size of a report */
-       unsigned long irq_mask;         /* mask of the interrupts
-                                        * (to be applied against ATTN IRQ) */
-};
-
 /**
  * struct rmi_data - stores information for hid communication
  *
  * @page_mutex: Locks current page to avoid changing pages in unexpected ways.
  * @page: Keeps track of the current virtual page
+ * @xport: transport device to be registered with the RMI4 core.
  *
  * @wait: Used for waiting for read data
  *
@@ -84,26 +71,18 @@ struct rmi_function {
  *
  * @flags: flags for the current device (started, reading, etc...)
  *
- * @f11: placeholder of internal RMI function F11 description
- * @f30: placeholder of internal RMI function F30 description
- *
- * @max_fingers: maximum finger count reported by the device
- * @max_x: maximum x value reported by the device
- * @max_y: maximum y value reported by the device
- *
- * @gpio_led_count: count of GPIOs + LEDs reported by F30
- * @button_count: actual physical buttons count
- * @button_mask: button mask used to decode GPIO ATTN reports
- * @button_state_mask: pull state of the buttons
- *
- * @input: pointer to the kernel input device
- *
  * @reset_work: worker which will be called in case of a mouse report
  * @hdev: pointer to the struct hid_device
+ *
+ * @device_flags: flags which describe the device
+ *
+ * @domain: the IRQ domain allocated for this RMI4 device
+ * @rmi_irq: the irq that will be used to generate events to rmi-core
  */
 struct rmi_data {
        struct mutex page_mutex;
        int page;
+       struct rmi_transport_dev xport;
 
        wait_queue_head_t wait;
 
@@ -115,34 +94,13 @@ struct rmi_data {
 
        unsigned long flags;
 
-       struct rmi_function f01;
-       struct rmi_function f11;
-       struct rmi_function f30;
-
-       unsigned int max_fingers;
-       unsigned int max_x;
-       unsigned int max_y;
-       unsigned int x_size_mm;
-       unsigned int y_size_mm;
-       bool read_f11_ctrl_regs;
-       u8 f11_ctrl_regs[RMI_F11_CTRL_REG_COUNT];
-
-       unsigned int gpio_led_count;
-       unsigned int button_count;
-       unsigned long button_mask;
-       unsigned long button_state_mask;
-
-       struct input_dev *input;
-
        struct work_struct reset_work;
        struct hid_device *hdev;
 
        unsigned long device_flags;
-       unsigned long firmware_id;
 
-       u8 f01_ctrl0;
-       u8 interrupt_enable_mask;
-       bool restore_interrupt_mask;
+       struct irq_domain *domain;
+       int rmi_irq;
 };
 
 #define RMI_PAGE(addr) (((addr) >> 8) & 0xff)
@@ -220,10 +178,11 @@ static int rmi_write_report(struct hid_device *hdev, u8 *report, int len)
        return ret;
 }
 
-static int rmi_read_block(struct hid_device *hdev, u16 addr, void *buf,
-               const int len)
+static int rmi_hid_read_block(struct rmi_transport_dev *xport, u16 addr,
+               void *buf, size_t len)
 {
-       struct rmi_data *data = hid_get_drvdata(hdev);
+       struct rmi_data *data = container_of(xport, struct rmi_data, xport);
+       struct hid_device *hdev = data->hdev;
        int ret;
        int bytes_read;
        int bytes_needed;
@@ -292,15 +251,11 @@ exit:
        return ret;
 }
 
-static inline int rmi_read(struct hid_device *hdev, u16 addr, void *buf)
-{
-       return rmi_read_block(hdev, addr, buf, 1);
-}
-
-static int rmi_write_block(struct hid_device *hdev, u16 addr, void *buf,
-               const int len)
+static int rmi_hid_write_block(struct rmi_transport_dev *xport, u16 addr,
+               const void *buf, size_t len)
 {
-       struct rmi_data *data = hid_get_drvdata(hdev);
+       struct rmi_data *data = container_of(xport, struct rmi_data, xport);
+       struct hid_device *hdev = data->hdev;
        int ret;
 
        mutex_lock(&data->page_mutex);
@@ -332,62 +287,20 @@ exit:
        return ret;
 }
 
-static inline int rmi_write(struct hid_device *hdev, u16 addr, void *buf)
-{
-       return rmi_write_block(hdev, addr, buf, 1);
-}
-
-static void rmi_f11_process_touch(struct rmi_data *hdata, int slot,
-               u8 finger_state, u8 *touch_data)
-{
-       int x, y, wx, wy;
-       int wide, major, minor;
-       int z;
-
-       input_mt_slot(hdata->input, slot);
-       input_mt_report_slot_state(hdata->input, MT_TOOL_FINGER,
-                       finger_state == 0x01);
-       if (finger_state == 0x01) {
-               x = (touch_data[0] << 4) | (touch_data[2] & 0x0F);
-               y = (touch_data[1] << 4) | (touch_data[2] >> 4);
-               wx = touch_data[3] & 0x0F;
-               wy = touch_data[3] >> 4;
-               wide = (wx > wy);
-               major = max(wx, wy);
-               minor = min(wx, wy);
-               z = touch_data[4];
-
-               /* y is inverted */
-               y = hdata->max_y - y;
-
-               input_event(hdata->input, EV_ABS, ABS_MT_POSITION_X, x);
-               input_event(hdata->input, EV_ABS, ABS_MT_POSITION_Y, y);
-               input_event(hdata->input, EV_ABS, ABS_MT_ORIENTATION, wide);
-               input_event(hdata->input, EV_ABS, ABS_MT_PRESSURE, z);
-               input_event(hdata->input, EV_ABS, ABS_MT_TOUCH_MAJOR, major);
-               input_event(hdata->input, EV_ABS, ABS_MT_TOUCH_MINOR, minor);
-       }
-}
-
 static int rmi_reset_attn_mode(struct hid_device *hdev)
 {
        struct rmi_data *data = hid_get_drvdata(hdev);
+       struct rmi_device *rmi_dev = data->xport.rmi_dev;
        int ret;
 
        ret = rmi_set_mode(hdev, RMI_MODE_ATTN_REPORTS);
        if (ret)
                return ret;
 
-       if (data->restore_interrupt_mask) {
-               ret = rmi_write(hdev, data->f01.control_base_addr + 1,
-                               &data->interrupt_enable_mask);
-               if (ret) {
-                       hid_err(hdev, "can not write F01 control register\n");
-                       return ret;
-               }
-       }
+       if (test_bit(RMI_STARTED, &data->flags))
+               ret = rmi_dev->driver->reset_handler(rmi_dev);
 
-       return 0;
+       return ret;
 }
 
 static void rmi_reset_work(struct work_struct *work)
@@ -399,102 +312,22 @@ static void rmi_reset_work(struct work_struct *work)
        rmi_reset_attn_mode(hdata->hdev);
 }
 
-static inline int rmi_schedule_reset(struct hid_device *hdev)
-{
-       struct rmi_data *hdata = hid_get_drvdata(hdev);
-       return schedule_work(&hdata->reset_work);
-}
-
-static int rmi_f11_input_event(struct hid_device *hdev, u8 irq, u8 *data,
-               int size)
-{
-       struct rmi_data *hdata = hid_get_drvdata(hdev);
-       int offset;
-       int i;
-
-       if (!(irq & hdata->f11.irq_mask) || size <= 0)
-               return 0;
-
-       offset = (hdata->max_fingers >> 2) + 1;
-       for (i = 0; i < hdata->max_fingers; i++) {
-               int fs_byte_position = i >> 2;
-               int fs_bit_position = (i & 0x3) << 1;
-               int finger_state = (data[fs_byte_position] >> fs_bit_position) &
-                                       0x03;
-               int position = offset + 5 * i;
-
-               if (position + 5 > size) {
-                       /* partial report, go on with what we received */
-                       printk_once(KERN_WARNING
-                               "%s %s: Detected incomplete finger report. Finger reports may occasionally get dropped on this platform.\n",
-                                dev_driver_string(&hdev->dev),
-                                dev_name(&hdev->dev));
-                       hid_dbg(hdev, "Incomplete finger report\n");
-                       break;
-               }
-
-               rmi_f11_process_touch(hdata, i, finger_state, &data[position]);
-       }
-       input_mt_sync_frame(hdata->input);
-       input_sync(hdata->input);
-       return hdata->f11.report_size;
-}
-
-static int rmi_f30_input_event(struct hid_device *hdev, u8 irq, u8 *data,
-               int size)
+static int rmi_input_event(struct hid_device *hdev, u8 *data, int size)
 {
        struct rmi_data *hdata = hid_get_drvdata(hdev);
-       int i;
-       int button = 0;
-       bool value;
+       struct rmi_device *rmi_dev = hdata->xport.rmi_dev;
+       unsigned long flags;
 
-       if (!(irq & hdata->f30.irq_mask))
+       if (!(test_bit(RMI_STARTED, &hdata->flags)))
                return 0;
 
-       if (size < (int)hdata->f30.report_size) {
-               hid_warn(hdev, "Click Button pressed, but the click data is missing\n");
-               return 0;
-       }
+       local_irq_save(flags);
 
-       for (i = 0; i < hdata->gpio_led_count; i++) {
-               if (test_bit(i, &hdata->button_mask)) {
-                       value = (data[i / 8] >> (i & 0x07)) & BIT(0);
-                       if (test_bit(i, &hdata->button_state_mask))
-                               value = !value;
-                       input_event(hdata->input, EV_KEY, BTN_LEFT + button++,
-                                       value);
-               }
-       }
-       return hdata->f30.report_size;
-}
-
-static int rmi_input_event(struct hid_device *hdev, u8 *data, int size)
-{
-       struct rmi_data *hdata = hid_get_drvdata(hdev);
-       unsigned long irq_mask = 0;
-       unsigned index = 2;
+       rmi_set_attn_data(rmi_dev, data[1], &data[2], size - 2);
 
-       if (!(test_bit(RMI_STARTED, &hdata->flags)))
-               return 0;
+       generic_handle_irq(hdata->rmi_irq);
 
-       irq_mask |= hdata->f11.irq_mask;
-       irq_mask |= hdata->f30.irq_mask;
-
-       if (data[1] & ~irq_mask)
-               hid_dbg(hdev, "unknown intr source:%02lx %s:%d\n",
-                       data[1] & ~irq_mask, __FILE__, __LINE__);
-
-       if (hdata->f11.interrupt_base < hdata->f30.interrupt_base) {
-               index += rmi_f11_input_event(hdev, data[1], &data[index],
-                               size - index);
-               index += rmi_f30_input_event(hdev, data[1], &data[index],
-                               size - index);
-       } else {
-               index += rmi_f30_input_event(hdev, data[1], &data[index],
-                               size - index);
-               index += rmi_f11_input_event(hdev, data[1], &data[index],
-                               size - index);
-       }
+       local_irq_restore(flags);
 
        return 1;
 }
@@ -568,7 +401,7 @@ static int rmi_event(struct hid_device *hdev, struct hid_field *field,
                                return 1;
                }
 
-               rmi_schedule_reset(hdev);
+               schedule_work(&data->reset_work);
                return 1;
        }
 
@@ -576,637 +409,71 @@ static int rmi_event(struct hid_device *hdev, struct hid_field *field,
 }
 
 #ifdef CONFIG_PM
-static int rmi_set_sleep_mode(struct hid_device *hdev, int sleep_mode)
-{
-       struct rmi_data *data = hid_get_drvdata(hdev);
-       int ret;
-       u8 f01_ctrl0;
-
-       f01_ctrl0 = (data->f01_ctrl0 & ~0x3) | sleep_mode;
-
-       ret = rmi_write(hdev, data->f01.control_base_addr,
-                       &f01_ctrl0);
-       if (ret) {
-               hid_err(hdev, "can not write sleep mode\n");
-               return ret;
-       }
-
-       return 0;
-}
-
 static int rmi_suspend(struct hid_device *hdev, pm_message_t message)
 {
        struct rmi_data *data = hid_get_drvdata(hdev);
-       int ret;
-       u8 buf[RMI_F11_CTRL_REG_COUNT];
-
-       if (!(data->device_flags & RMI_DEVICE))
-               return 0;
-
-       ret = rmi_read_block(hdev, data->f11.control_base_addr, buf,
-                               RMI_F11_CTRL_REG_COUNT);
-       if (ret)
-               hid_warn(hdev, "can not read F11 control registers\n");
-       else
-               memcpy(data->f11_ctrl_regs, buf, RMI_F11_CTRL_REG_COUNT);
-
-
-       if (!device_may_wakeup(hdev->dev.parent))
-               return rmi_set_sleep_mode(hdev, RMI_SLEEP_DEEP_SLEEP);
-
-       return 0;
-}
-
-static int rmi_post_reset(struct hid_device *hdev)
-{
-       struct rmi_data *data = hid_get_drvdata(hdev);
+       struct rmi_device *rmi_dev = data->xport.rmi_dev;
        int ret;
 
        if (!(data->device_flags & RMI_DEVICE))
                return 0;
 
-       ret = rmi_reset_attn_mode(hdev);
+       ret = rmi_driver_suspend(rmi_dev, false);
        if (ret) {
-               hid_err(hdev, "can not set rmi mode\n");
+               hid_warn(hdev, "Failed to suspend device: %d\n", ret);
                return ret;
        }
 
-       if (data->read_f11_ctrl_regs) {
-               ret = rmi_write_block(hdev, data->f11.control_base_addr,
-                               data->f11_ctrl_regs, RMI_F11_CTRL_REG_COUNT);
-               if (ret)
-                       hid_warn(hdev,
-                               "can not write F11 control registers after reset\n");
-       }
-
-       if (!device_may_wakeup(hdev->dev.parent)) {
-               ret = rmi_set_sleep_mode(hdev, RMI_SLEEP_NORMAL);
-               if (ret) {
-                       hid_err(hdev, "can not write sleep mode\n");
-                       return ret;
-               }
-       }
-
-       return ret;
+       return 0;
 }
 
 static int rmi_post_resume(struct hid_device *hdev)
 {
        struct rmi_data *data = hid_get_drvdata(hdev);
+       struct rmi_device *rmi_dev = data->xport.rmi_dev;
+       int ret;
 
        if (!(data->device_flags & RMI_DEVICE))
                return 0;
 
-       return rmi_reset_attn_mode(hdev);
-}
-#endif /* CONFIG_PM */
-
-#define RMI4_MAX_PAGE 0xff
-#define RMI4_PAGE_SIZE 0x0100
-
-#define PDT_START_SCAN_LOCATION 0x00e9
-#define PDT_END_SCAN_LOCATION  0x0005
-#define RMI4_END_OF_PDT(id) ((id) == 0x00 || (id) == 0xff)
-
-struct pdt_entry {
-       u8 query_base_addr:8;
-       u8 command_base_addr:8;
-       u8 control_base_addr:8;
-       u8 data_base_addr:8;
-       u8 interrupt_source_count:3;
-       u8 bits3and4:2;
-       u8 function_version:2;
-       u8 bit7:1;
-       u8 function_number:8;
-} __attribute__((__packed__));
-
-static inline unsigned long rmi_gen_mask(unsigned irq_base, unsigned irq_count)
-{
-       return GENMASK(irq_count + irq_base - 1, irq_base);
-}
-
-static void rmi_register_function(struct rmi_data *data,
-       struct pdt_entry *pdt_entry, int page, unsigned interrupt_count)
-{
-       struct rmi_function *f = NULL;
-       u16 page_base = page << 8;
-
-       switch (pdt_entry->function_number) {
-       case 0x01:
-               f = &data->f01;
-               break;
-       case 0x11:
-               f = &data->f11;
-               break;
-       case 0x30:
-               f = &data->f30;
-               break;
-       }
-
-       if (f) {
-               f->page = page;
-               f->query_base_addr = page_base | pdt_entry->query_base_addr;
-               f->command_base_addr = page_base | pdt_entry->command_base_addr;
-               f->control_base_addr = page_base | pdt_entry->control_base_addr;
-               f->data_base_addr = page_base | pdt_entry->data_base_addr;
-               f->interrupt_base = interrupt_count;
-               f->interrupt_count = pdt_entry->interrupt_source_count;
-               f->irq_mask = rmi_gen_mask(f->interrupt_base,
-                                               f->interrupt_count);
-               data->interrupt_enable_mask |= f->irq_mask;
-       }
-}
-
-static int rmi_scan_pdt(struct hid_device *hdev)
-{
-       struct rmi_data *data = hid_get_drvdata(hdev);
-       struct pdt_entry entry;
-       int page;
-       bool page_has_function;
-       int i;
-       int retval;
-       int interrupt = 0;
-       u16 page_start, pdt_start , pdt_end;
-
-       hid_info(hdev, "Scanning PDT...\n");
-
-       for (page = 0; (page <= RMI4_MAX_PAGE); page++) {
-               page_start = RMI4_PAGE_SIZE * page;
-               pdt_start = page_start + PDT_START_SCAN_LOCATION;
-               pdt_end = page_start + PDT_END_SCAN_LOCATION;
-
-               page_has_function = false;
-               for (i = pdt_start; i >= pdt_end; i -= sizeof(entry)) {
-                       retval = rmi_read_block(hdev, i, &entry, sizeof(entry));
-                       if (retval) {
-                               hid_err(hdev,
-                                       "Read of PDT entry at %#06x failed.\n",
-                                       i);
-                               goto error_exit;
-                       }
-
-                       if (RMI4_END_OF_PDT(entry.function_number))
-                               break;
-
-                       page_has_function = true;
-
-                       hid_info(hdev, "Found F%02X on page %#04x\n",
-                                       entry.function_number, page);
-
-                       rmi_register_function(data, &entry, page, interrupt);
-                       interrupt += entry.interrupt_source_count;
-               }
-
-               if (!page_has_function)
-                       break;
-       }
-
-       hid_info(hdev, "%s: Done with PDT scan.\n", __func__);
-       retval = 0;
-
-error_exit:
-       return retval;
-}
-
-#define RMI_DEVICE_F01_BASIC_QUERY_LEN 11
-
-static int rmi_populate_f01(struct hid_device *hdev)
-{
-       struct rmi_data *data = hid_get_drvdata(hdev);
-       u8 basic_queries[RMI_DEVICE_F01_BASIC_QUERY_LEN];
-       u8 info[3];
-       int ret;
-       bool has_query42;
-       bool has_lts;
-       bool has_sensor_id;
-       bool has_ds4_queries = false;
-       bool has_build_id_query = false;
-       bool has_package_id_query = false;
-       u16 query_offset = data->f01.query_base_addr;
-       u16 prod_info_addr;
-       u8 ds4_query_len;
-
-       ret = rmi_read_block(hdev, query_offset, basic_queries,
-                               RMI_DEVICE_F01_BASIC_QUERY_LEN);
-       if (ret) {
-               hid_err(hdev, "Can not read basic queries from Function 0x1.\n");
-               return ret;
-       }
-
-       has_lts = !!(basic_queries[0] & BIT(2));
-       has_sensor_id = !!(basic_queries[1] & BIT(3));
-       has_query42 = !!(basic_queries[1] & BIT(7));
-
-       query_offset += 11;
-       prod_info_addr = query_offset + 6;
-       query_offset += 10;
-
-       if (has_lts)
-               query_offset += 20;
-
-       if (has_sensor_id)
-               query_offset++;
-
-       if (has_query42) {
-               ret = rmi_read(hdev, query_offset, info);
-               if (ret) {
-                       hid_err(hdev, "Can not read query42.\n");
-                       return ret;
-               }
-               has_ds4_queries = !!(info[0] & BIT(0));
-               query_offset++;
-       }
-
-       if (has_ds4_queries) {
-               ret = rmi_read(hdev, query_offset, &ds4_query_len);
-               if (ret) {
-                       hid_err(hdev, "Can not read DS4 Query length.\n");
-                       return ret;
-               }
-               query_offset++;
-
-               if (ds4_query_len > 0) {
-                       ret = rmi_read(hdev, query_offset, info);
-                       if (ret) {
-                               hid_err(hdev, "Can not read DS4 query.\n");
-                               return ret;
-                       }
-
-                       has_package_id_query = !!(info[0] & BIT(0));
-                       has_build_id_query = !!(info[0] & BIT(1));
-               }
-       }
-
-       if (has_package_id_query)
-               prod_info_addr++;
-
-       if (has_build_id_query) {
-               ret = rmi_read_block(hdev, prod_info_addr, info, 3);
-               if (ret) {
-                       hid_err(hdev, "Can not read product info.\n");
-                       return ret;
-               }
-
-               data->firmware_id = info[1] << 8 | info[0];
-               data->firmware_id += info[2] * 65536;
-       }
-
-       ret = rmi_read_block(hdev, data->f01.control_base_addr, info,
-                               2);
-
-       if (ret) {
-               hid_err(hdev, "can not read f01 ctrl registers\n");
-               return ret;
-       }
-
-       data->f01_ctrl0 = info[0];
-
-       if (!info[1]) {
-               /*
-                * Do to a firmware bug in some touchpads the F01 interrupt
-                * enable control register will be cleared on reset.
-                * This will stop the touchpad from reporting data, so
-                * if F01 CTRL1 is 0 then we need to explicitly enable
-                * interrupts for the functions we want data for.
-                */
-               data->restore_interrupt_mask = true;
-
-               ret = rmi_write(hdev, data->f01.control_base_addr + 1,
-                               &data->interrupt_enable_mask);
-               if (ret) {
-                       hid_err(hdev, "can not write to control reg 1: %d.\n",
-                               ret);
-                       return ret;
-               }
-       }
-
-       return 0;
-}
-
-static int rmi_populate_f11(struct hid_device *hdev)
-{
-       struct rmi_data *data = hid_get_drvdata(hdev);
-       u8 buf[20];
-       int ret;
-       bool has_query9;
-       bool has_query10 = false;
-       bool has_query11;
-       bool has_query12;
-       bool has_query27;
-       bool has_query28;
-       bool has_query36 = false;
-       bool has_physical_props;
-       bool has_gestures;
-       bool has_rel;
-       bool has_data40 = false;
-       bool has_dribble = false;
-       bool has_palm_detect = false;
-       unsigned x_size, y_size;
-       u16 query_offset;
-
-       if (!data->f11.query_base_addr) {
-               hid_err(hdev, "No 2D sensor found, giving up.\n");
-               return -ENODEV;
-       }
-
-       /* query 0 contains some useful information */
-       ret = rmi_read(hdev, data->f11.query_base_addr, buf);
-       if (ret) {
-               hid_err(hdev, "can not get query 0: %d.\n", ret);
-               return ret;
-       }
-       has_query9 = !!(buf[0] & BIT(3));
-       has_query11 = !!(buf[0] & BIT(4));
-       has_query12 = !!(buf[0] & BIT(5));
-       has_query27 = !!(buf[0] & BIT(6));
-       has_query28 = !!(buf[0] & BIT(7));
-
-       /* query 1 to get the max number of fingers */
-       ret = rmi_read(hdev, data->f11.query_base_addr + 1, buf);
-       if (ret) {
-               hid_err(hdev, "can not get NumberOfFingers: %d.\n", ret);
-               return ret;
-       }
-       data->max_fingers = (buf[0] & 0x07) + 1;
-       if (data->max_fingers > 5)
-               data->max_fingers = 10;
-
-       data->f11.report_size = data->max_fingers * 5 +
-                               DIV_ROUND_UP(data->max_fingers, 4);
-
-       if (!(buf[0] & BIT(4))) {
-               hid_err(hdev, "No absolute events, giving up.\n");
-               return -ENODEV;
-       }
-
-       has_rel = !!(buf[0] & BIT(3));
-       has_gestures = !!(buf[0] & BIT(5));
-
-       ret = rmi_read(hdev, data->f11.query_base_addr + 5, buf);
-       if (ret) {
-               hid_err(hdev, "can not get absolute data sources: %d.\n", ret);
+       ret = rmi_reset_attn_mode(hdev);
+       if (ret)
                return ret;
-       }
-
-       has_dribble = !!(buf[0] & BIT(4));
-
-       /*
-        * At least 4 queries are guaranteed to be present in F11
-        * +1 for query 5 which is present since absolute events are
-        * reported and +1 for query 12.
-        */
-       query_offset = 6;
-
-       if (has_rel)
-               ++query_offset; /* query 6 is present */
-
-       if (has_gestures) {
-               /* query 8 to find out if query 10 exists */
-               ret = rmi_read(hdev,
-                       data->f11.query_base_addr + query_offset + 1, buf);
-               if (ret) {
-                       hid_err(hdev, "can not read gesture information: %d.\n",
-                               ret);
-                       return ret;
-               }
-               has_palm_detect = !!(buf[0] & BIT(0));
-               has_query10 = !!(buf[0] & BIT(2));
-
-               query_offset += 2; /* query 7 and 8 are present */
-       }
-
-       if (has_query9)
-               ++query_offset;
-
-       if (has_query10)
-               ++query_offset;
-
-       if (has_query11)
-               ++query_offset;
-
-       /* query 12 to know if the physical properties are reported */
-       if (has_query12) {
-               ret = rmi_read(hdev, data->f11.query_base_addr
-                               + query_offset, buf);
-               if (ret) {
-                       hid_err(hdev, "can not get query 12: %d.\n", ret);
-                       return ret;
-               }
-               has_physical_props = !!(buf[0] & BIT(5));
-
-               if (has_physical_props) {
-                       query_offset += 1;
-                       ret = rmi_read_block(hdev,
-                                       data->f11.query_base_addr
-                                               + query_offset, buf, 4);
-                       if (ret) {
-                               hid_err(hdev, "can not read query 15-18: %d.\n",
-                                       ret);
-                               return ret;
-                       }
-
-                       x_size = buf[0] | (buf[1] << 8);
-                       y_size = buf[2] | (buf[3] << 8);
-
-                       data->x_size_mm = DIV_ROUND_CLOSEST(x_size, 10);
-                       data->y_size_mm = DIV_ROUND_CLOSEST(y_size, 10);
-
-                       hid_info(hdev, "%s: size in mm: %d x %d\n",
-                                __func__, data->x_size_mm, data->y_size_mm);
-
-                       /*
-                        * query 15 - 18 contain the size of the sensor
-                        * and query 19 - 26 contain bezel dimensions
-                        */
-                       query_offset += 12;
-               }
-       }
-
-       if (has_query27)
-               ++query_offset;
 
-       if (has_query28) {
-               ret = rmi_read(hdev, data->f11.query_base_addr
-                               + query_offset, buf);
-               if (ret) {
-                       hid_err(hdev, "can not get query 28: %d.\n", ret);
-                       return ret;
-               }
-
-               has_query36 = !!(buf[0] & BIT(6));
-       }
-
-       if (has_query36) {
-               query_offset += 2;
-               ret = rmi_read(hdev, data->f11.query_base_addr
-                               + query_offset, buf);
-               if (ret) {
-                       hid_err(hdev, "can not get query 36: %d.\n", ret);
-                       return ret;
-               }
-
-               has_data40 = !!(buf[0] & BIT(5));
-       }
-
-
-       if (has_data40)
-               data->f11.report_size += data->max_fingers * 2;
-
-       ret = rmi_read_block(hdev, data->f11.control_base_addr,
-                       data->f11_ctrl_regs, RMI_F11_CTRL_REG_COUNT);
+       ret = rmi_driver_resume(rmi_dev, false);
        if (ret) {
-               hid_err(hdev, "can not read ctrl block of size 11: %d.\n", ret);
+               hid_warn(hdev, "Failed to resume device: %d\n", ret);
                return ret;
        }
 
-       /* data->f11_ctrl_regs now contains valid register data */
-       data->read_f11_ctrl_regs = true;
-
-       data->max_x = data->f11_ctrl_regs[6] | (data->f11_ctrl_regs[7] << 8);
-       data->max_y = data->f11_ctrl_regs[8] | (data->f11_ctrl_regs[9] << 8);
-
-       if (has_dribble) {
-               data->f11_ctrl_regs[0] = data->f11_ctrl_regs[0] & ~BIT(6);
-               ret = rmi_write(hdev, data->f11.control_base_addr,
-                               data->f11_ctrl_regs);
-               if (ret) {
-                       hid_err(hdev, "can not write to control reg 0: %d.\n",
-                               ret);
-                       return ret;
-               }
-       }
-
-       if (has_palm_detect) {
-               data->f11_ctrl_regs[11] = data->f11_ctrl_regs[11] & ~BIT(0);
-               ret = rmi_write(hdev, data->f11.control_base_addr + 11,
-                               &data->f11_ctrl_regs[11]);
-               if (ret) {
-                       hid_err(hdev, "can not write to control reg 11: %d.\n",
-                               ret);
-                       return ret;
-               }
-       }
-
-       return 0;
-}
-
-static int rmi_populate_f30(struct hid_device *hdev)
-{
-       struct rmi_data *data = hid_get_drvdata(hdev);
-       u8 buf[20];
-       int ret;
-       bool has_gpio, has_led;
-       unsigned bytes_per_ctrl;
-       u8 ctrl2_addr;
-       int ctrl2_3_length;
-       int i;
-
-       /* function F30 is for physical buttons */
-       if (!data->f30.query_base_addr) {
-               hid_err(hdev, "No GPIO/LEDs found, giving up.\n");
-               return -ENODEV;
-       }
-
-       ret = rmi_read_block(hdev, data->f30.query_base_addr, buf, 2);
-       if (ret) {
-               hid_err(hdev, "can not get F30 query registers: %d.\n", ret);
-               return ret;
-       }
-
-       has_gpio = !!(buf[0] & BIT(3));
-       has_led = !!(buf[0] & BIT(2));
-       data->gpio_led_count = buf[1] & 0x1f;
-
-       /* retrieve ctrl 2 & 3 registers */
-       bytes_per_ctrl = (data->gpio_led_count + 7) / 8;
-       /* Ctrl0 is present only if both has_gpio and has_led are set*/
-       ctrl2_addr = (has_gpio && has_led) ? bytes_per_ctrl : 0;
-       /* Ctrl1 is always be present */
-       ctrl2_addr += bytes_per_ctrl;
-       ctrl2_3_length = 2 * bytes_per_ctrl;
-
-       data->f30.report_size = bytes_per_ctrl;
-
-       ret = rmi_read_block(hdev, data->f30.control_base_addr + ctrl2_addr,
-                               buf, ctrl2_3_length);
-       if (ret) {
-               hid_err(hdev, "can not read ctrl 2&3 block of size %d: %d.\n",
-                       ctrl2_3_length, ret);
-               return ret;
-       }
-
-       for (i = 0; i < data->gpio_led_count; i++) {
-               int byte_position = i >> 3;
-               int bit_position = i & 0x07;
-               u8 dir_byte = buf[byte_position];
-               u8 data_byte = buf[byte_position + bytes_per_ctrl];
-               bool dir = (dir_byte >> bit_position) & BIT(0);
-               bool dat = (data_byte >> bit_position) & BIT(0);
-
-               if (dir == 0) {
-                       /* input mode */
-                       if (dat) {
-                               /* actual buttons have pull up resistor */
-                               data->button_count++;
-                               set_bit(i, &data->button_mask);
-                               set_bit(i, &data->button_state_mask);
-                       }
-               }
-
-       }
-
        return 0;
 }
+#endif /* CONFIG_PM */
 
-static int rmi_populate(struct hid_device *hdev)
+static int rmi_hid_reset(struct rmi_transport_dev *xport, u16 reset_addr)
 {
-       struct rmi_data *data = hid_get_drvdata(hdev);
-       int ret;
-
-       ret = rmi_scan_pdt(hdev);
-       if (ret) {
-               hid_err(hdev, "PDT scan failed with code %d.\n", ret);
-               return ret;
-       }
-
-       ret = rmi_populate_f01(hdev);
-       if (ret) {
-               hid_err(hdev, "Error while initializing F01 (%d).\n", ret);
-               return ret;
-       }
-
-       ret = rmi_populate_f11(hdev);
-       if (ret) {
-               hid_err(hdev, "Error while initializing F11 (%d).\n", ret);
-               return ret;
-       }
-
-       if (!(data->device_flags & RMI_DEVICE_HAS_PHYS_BUTTONS)) {
-               ret = rmi_populate_f30(hdev);
-               if (ret)
-                       hid_warn(hdev, "Error while initializing F30 (%d).\n", ret);
-       }
+       struct rmi_data *data = container_of(xport, struct rmi_data, xport);
+       struct hid_device *hdev = data->hdev;
 
-       return 0;
+       return rmi_reset_attn_mode(hdev);
 }
 
 static int rmi_input_configured(struct hid_device *hdev, struct hid_input *hi)
 {
        struct rmi_data *data = hid_get_drvdata(hdev);
        struct input_dev *input = hi->input;
-       int ret;
-       int res_x, res_y, i;
+       int ret = 0;
+
+       if (!(data->device_flags & RMI_DEVICE))
+               return 0;
 
-       data->input = input;
+       data->xport.input = input;
 
        hid_dbg(hdev, "Opening low level driver\n");
        ret = hid_hw_open(hdev);
        if (ret)
                return ret;
 
-       if (!(data->device_flags & RMI_DEVICE))
-               return 0;
-
        /* Allow incoming hid reports */
        hid_device_io_start(hdev);
 
@@ -1222,40 +489,10 @@ static int rmi_input_configured(struct hid_device *hdev, struct hid_input *hi)
                goto exit;
        }
 
-       ret = rmi_populate(hdev);
-       if (ret)
-               goto exit;
-
-       hid_info(hdev, "firmware id: %ld\n", data->firmware_id);
-
-       __set_bit(EV_ABS, input->evbit);
-       input_set_abs_params(input, ABS_MT_POSITION_X, 1, data->max_x, 0, 0);
-       input_set_abs_params(input, ABS_MT_POSITION_Y, 1, data->max_y, 0, 0);
-
-       if (data->x_size_mm && data->y_size_mm) {
-               res_x = (data->max_x - 1) / data->x_size_mm;
-               res_y = (data->max_y - 1) / data->y_size_mm;
-
-               input_abs_set_res(input, ABS_MT_POSITION_X, res_x);
-               input_abs_set_res(input, ABS_MT_POSITION_Y, res_y);
-       }
-
-       input_set_abs_params(input, ABS_MT_ORIENTATION, 0, 1, 0, 0);
-       input_set_abs_params(input, ABS_MT_PRESSURE, 0, 0xff, 0, 0);
-       input_set_abs_params(input, ABS_MT_TOUCH_MAJOR, 0, 0x0f, 0, 0);
-       input_set_abs_params(input, ABS_MT_TOUCH_MINOR, 0, 0x0f, 0, 0);
-
-       ret = input_mt_init_slots(input, data->max_fingers, INPUT_MT_POINTER);
-       if (ret < 0)
+       ret = rmi_register_transport_device(&data->xport);
+       if (ret < 0) {
+               dev_err(&hdev->dev, "failed to register transport driver\n");
                goto exit;
-
-       if (data->button_count) {
-               __set_bit(EV_KEY, input->evbit);
-               for (i = 0; i < data->button_count; i++)
-                       __set_bit(BTN_LEFT + i, input->keybit);
-
-               if (data->button_count == 1)
-                       __set_bit(INPUT_PROP_BUTTONPAD, input->propbit);
        }
 
        set_bit(RMI_STARTED, &data->flags);
@@ -1304,6 +541,71 @@ static int rmi_check_valid_report_id(struct hid_device *hdev, unsigned type,
        return 0;
 }
 
+static struct rmi_device_platform_data rmi_hid_pdata = {
+       .sensor_pdata = {
+               .sensor_type = rmi_sensor_touchpad,
+               .axis_align.flip_y = true,
+               .dribble = RMI_REG_STATE_ON,
+               .palm_detect = RMI_REG_STATE_OFF,
+       },
+};
+
+static const struct rmi_transport_ops hid_rmi_ops = {
+       .write_block    = rmi_hid_write_block,
+       .read_block     = rmi_hid_read_block,
+       .reset          = rmi_hid_reset,
+};
+
+static void rmi_irq_teardown(void *data)
+{
+       struct rmi_data *hdata = data;
+       struct irq_domain *domain = hdata->domain;
+
+       if (!domain)
+               return;
+
+       irq_dispose_mapping(irq_find_mapping(domain, 0));
+
+       irq_domain_remove(domain);
+       hdata->domain = NULL;
+       hdata->rmi_irq = 0;
+}
+
+static int rmi_irq_map(struct irq_domain *h, unsigned int virq,
+                      irq_hw_number_t hw_irq_num)
+{
+       irq_set_chip_and_handler(virq, &dummy_irq_chip, handle_simple_irq);
+
+       return 0;
+}
+
+static const struct irq_domain_ops rmi_irq_ops = {
+       .map = rmi_irq_map,
+};
+
+static int rmi_setup_irq_domain(struct hid_device *hdev)
+{
+       struct rmi_data *hdata = hid_get_drvdata(hdev);
+       int ret;
+
+       hdata->domain = irq_domain_create_linear(hdev->dev.fwnode, 1,
+                                                &rmi_irq_ops, hdata);
+       if (!hdata->domain)
+               return -ENOMEM;
+
+       ret = devm_add_action_or_reset(&hdev->dev, &rmi_irq_teardown, hdata);
+       if (ret)
+               return ret;
+
+       hdata->rmi_irq = irq_create_mapping(hdata->domain, 0);
+       if (hdata->rmi_irq <= 0) {
+               hid_err(hdev, "Can't allocate an IRQ\n");
+               return hdata->rmi_irq < 0 ? hdata->rmi_irq : -ENXIO;
+       }
+
+       return 0;
+}
+
 static int rmi_probe(struct hid_device *hdev, const struct hid_device_id *id)
 {
        struct rmi_data *data = NULL;
@@ -1365,8 +667,8 @@ static int rmi_probe(struct hid_device *hdev, const struct hid_device_id *id)
 
        data->writeReport = devm_kzalloc(&hdev->dev, alloc_size, GFP_KERNEL);
        if (!data->writeReport) {
-               ret = -ENOMEM;
-               return ret;
+               hid_err(hdev, "failed to allocate buffer for HID reports\n");
+               return -ENOMEM;
        }
 
        data->readReport = data->writeReport + data->output_report_size;
@@ -1375,6 +677,21 @@ static int rmi_probe(struct hid_device *hdev, const struct hid_device_id *id)
 
        mutex_init(&data->page_mutex);
 
+       ret = rmi_setup_irq_domain(hdev);
+       if (ret) {
+               hid_err(hdev, "failed to allocate IRQ domain\n");
+               return ret;
+       }
+
+       if (data->device_flags & RMI_DEVICE_HAS_PHYS_BUTTONS)
+               rmi_hid_pdata.f30_data.disable = true;
+
+       data->xport.dev = hdev->dev.parent;
+       data->xport.pdata = rmi_hid_pdata;
+       data->xport.pdata.irq = data->rmi_irq;
+       data->xport.proto_name = "hid";
+       data->xport.ops = &hid_rmi_ops;
+
 start:
        ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
        if (ret) {
@@ -1382,17 +699,6 @@ start:
                return ret;
        }
 
-       if ((data->device_flags & RMI_DEVICE) &&
-           !test_bit(RMI_STARTED, &data->flags))
-               /*
-                * The device maybe in the bootloader if rmi_input_configured
-                * failed to find F11 in the PDT. Print an error, but don't
-                * return an error from rmi_probe so that hidraw will be
-                * accessible from userspace. That way a userspace tool
-                * can be used to reload working firmware on the touchpad.
-                */
-               hid_err(hdev, "Device failed to be properly configured\n");
-
        return 0;
 }
 
@@ -1401,6 +707,8 @@ static void rmi_remove(struct hid_device *hdev)
        struct rmi_data *hdata = hid_get_drvdata(hdev);
 
        clear_bit(RMI_STARTED, &hdata->flags);
+       cancel_work_sync(&hdata->reset_work);
+       rmi_unregister_transport_device(&hdata->xport);
 
        hid_hw_stop(hdev);
 }
@@ -1408,6 +716,7 @@ static void rmi_remove(struct hid_device *hdev)
 static const struct hid_device_id rmi_id[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_RAZER, USB_DEVICE_ID_RAZER_BLADE_14),
                .driver_data = RMI_DEVICE_HAS_PHYS_BUTTONS },
+       { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X1_COVER) },
        { HID_DEVICE(HID_BUS_ANY, HID_GROUP_RMI, HID_ANY_ID, HID_ANY_ID) },
        { }
 };
@@ -1425,7 +734,7 @@ static struct hid_driver rmi_driver = {
 #ifdef CONFIG_PM
        .suspend                = rmi_suspend,
        .resume                 = rmi_post_resume,
-       .reset_resume           = rmi_post_reset,
+       .reset_resume           = rmi_post_resume,
 #endif
 };
 
index ab68afcba2a2613a2a5fe916fd0f727ab96bf16d..a5897b9c0956a1fe0a09e01f6d5aec3563350145 100644 (file)
 #define IPC_ILUP_OFFS                  (0)
 #define IPC_ILUP_BIT                   (1<<IPC_ILUP_OFFS)
 
+/*
+ * ISH FW status bits in ISH FW Status Register
+ */
+#define IPC_ISH_FWSTS_SHIFT            12
+#define IPC_ISH_FWSTS_MASK             GENMASK(15, 12)
+#define IPC_GET_ISH_FWSTS(status)      \
+       (((status) & IPC_ISH_FWSTS_MASK) >> IPC_ISH_FWSTS_SHIFT)
+
 /*
  * FW status bits (relevant)
  */
index 46615a03e78fc72558f136330f52c0a486fdcc91..fd34307a7a705591dc05db23d2fb8e1daaa02be8 100644 (file)
@@ -61,6 +61,18 @@ struct ish_hw {
        void __iomem *mem_addr;
 };
 
+/*
+ * ISH FW status type
+ */
+enum {
+       FWSTS_AFTER_RESET               = 0,
+       FWSTS_WAIT_FOR_HOST             = 4,
+       FWSTS_START_KERNEL_DMA          = 5,
+       FWSTS_FW_IS_RUNNING             = 7,
+       FWSTS_SENSOR_APP_LOADED         = 8,
+       FWSTS_SENSOR_APP_RUNNING        = 15
+};
+
 #define to_ish_hw(dev) (struct ish_hw *)((dev)->hw)
 
 irqreturn_t ish_irq_handler(int irq, void *dev_id);
index 20d647d2dd2cbfa5fb57fbc85de236147cf448ab..8df81dc845295962a2e3b04b16e65ff990162176 100644 (file)
@@ -24,7 +24,6 @@
 #include <linux/sched.h>
 #include <linux/interrupt.h>
 #include <linux/workqueue.h>
-#include <linux/miscdevice.h>
 #define CREATE_TRACE_POINTS
 #include <trace/events/intel_ish.h>
 #include "ishtp-dev.h"
@@ -47,7 +46,8 @@ MODULE_DEVICE_TABLE(pci, ish_pci_tbl);
  *
  * Callback to direct log messages to Linux trace buffers
  */
-static void ish_event_tracer(struct ishtp_device *dev, char *format, ...)
+static __printf(2, 3)
+void ish_event_tracer(struct ishtp_device *dev, const char *format, ...)
 {
        if (trace_ishtp_dump_enabled()) {
                va_list args;
@@ -205,12 +205,15 @@ static void ish_remove(struct pci_dev *pdev)
 #ifdef CONFIG_PM
 static struct device *ish_resume_device;
 
+/* 50ms to get resume response */
+#define WAIT_FOR_RESUME_ACK_MS         50
+
 /**
  * ish_resume_handler() - Work function to complete resume
  * @work:      work struct
  *
  * The resume work function to complete resume function asynchronously.
- * There are two types of platforms, one where ISH is not powered off,
+ * There are two resume paths, one where ISH is not powered off,
  * in that case a simple resume message is enough, others we need
  * a reset sequence.
  */
@@ -218,20 +221,31 @@ static void ish_resume_handler(struct work_struct *work)
 {
        struct pci_dev *pdev = to_pci_dev(ish_resume_device);
        struct ishtp_device *dev = pci_get_drvdata(pdev);
+       uint32_t fwsts;
        int ret;
 
-       ishtp_send_resume(dev);
+       /* Get ISH FW status */
+       fwsts = IPC_GET_ISH_FWSTS(dev->ops->get_fw_status(dev));
 
-       /* 50 ms to get resume response */
-       if (dev->resume_flag)
-               ret = wait_event_interruptible_timeout(dev->resume_wait,
-                                                      !dev->resume_flag,
-                                                      msecs_to_jiffies(50));
+       /*
+        * If currently, in ISH FW, sensor app is loaded or beyond that,
+        * it means ISH isn't powered off, in this case, send a resume message.
+        */
+       if (fwsts >= FWSTS_SENSOR_APP_LOADED) {
+               ishtp_send_resume(dev);
+
+               /* Waiting to get resume response */
+               if (dev->resume_flag)
+                       ret = wait_event_interruptible_timeout(dev->resume_wait,
+                               !dev->resume_flag,
+                               msecs_to_jiffies(WAIT_FOR_RESUME_ACK_MS));
+       }
 
        /*
-        * If no resume response. This platform  is not S0ix compatible
-        * So on resume full reboot of ISH processor will happen, so
-        * need to go through init sequence again
+        * If in ISH FW, sensor app isn't loaded yet, or no resume response.
+        * That means this platform is not S0ix compatible, or something is
+        * wrong with ISH FW. So on resume, full reboot of ISH processor will
+        * happen, so need to go through init sequence again.
         */
        if (dev->resume_flag)
                ish_init(dev);
index 277983aa1d90a31c607105308c8047a62a747a0e..cd23903ddcf194e581902102ebcd56f70459ac9a 100644 (file)
@@ -208,7 +208,7 @@ int ishtp_hid_probe(unsigned int cur_hid_dev,
        hid->version = le16_to_cpu(ISH_HID_VERSION);
        hid->vendor = le16_to_cpu(ISH_HID_VENDOR);
        hid->product = le16_to_cpu(ISH_HID_PRODUCT);
-       snprintf(hid->name, sizeof(hid->name), "%s %04hX:%04hX", "hid-ishtp",
+       snprintf(hid->name, sizeof(hid->name), "%s %04X:%04X", "hid-ishtp",
                hid->vendor, hid->product);
 
        rv = hid_add_device(hid);
index f4cbc744e6571da94f5d0187ff4cf6e15a2fcd63..5f382fedc2abfaa468595dc5296a650c00525333 100644 (file)
@@ -358,7 +358,7 @@ static void ishtp_cl_dev_release(struct device *dev)
        kfree(to_ishtp_cl_device(dev));
 }
 
-static struct device_type ishtp_cl_device_type = {
+static const struct device_type ishtp_cl_device_type = {
        .release        = ishtp_cl_dev_release,
 };
 
index 59460b66e6890d06313d8098dcac152339f15b0b..b7213608ce43ac111e608d866dc3307a3f059b4f 100644 (file)
@@ -19,7 +19,6 @@
 #include <linux/sched.h>
 #include <linux/wait.h>
 #include <linux/spinlock.h>
-#include <linux/miscdevice.h>
 #include "ishtp-dev.h"
 #include "hbm.h"
 #include "client.h"
index ac364418e17cdebb9a7d17f89a15171986bcfad4..d27e03526acd8b57fdb43fcee41420fd3520d552 100644 (file)
@@ -16,7 +16,6 @@
 #include <linux/export.h>
 #include <linux/slab.h>
 #include <linux/sched.h>
-#include <linux/miscdevice.h>
 #include "ishtp-dev.h"
 #include "hbm.h"
 #include "client.h"
index a94f9a8a96a022f7684d33c79b15115aae91e2b1..6a6d927b78b0e254d5f5248ed11a46852d969680 100644 (file)
@@ -238,7 +238,8 @@ struct ishtp_device {
        uint64_t ishtp_host_dma_rx_buf_phys;
 
        /* Dump to trace buffers if enabled*/
-       void (*print_log)(struct ishtp_device *dev, char *format, ...);
+       __printf(2, 3) void (*print_log)(struct ishtp_device *dev,
+                                        const char *format, ...);
 
        /* Debug stats */
        unsigned int    ipc_rx_cnt;
index 333108ef18cf2f3f94ee3816b2ba6522017295b6..961bc6fdd2d908835fa9a07d169a4746fb44189d 100644 (file)
@@ -43,7 +43,6 @@
  */
 
 #define DRIVER_DESC "USB HID core driver"
-#define DRIVER_LICENSE "GPL"
 
 /*
  * Module parameters.
@@ -1660,4 +1659,4 @@ MODULE_AUTHOR("Andreas Gal");
 MODULE_AUTHOR("Vojtech Pavlik");
 MODULE_AUTHOR("Jiri Kosina");
 MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE(DRIVER_LICENSE);
+MODULE_LICENSE("GPL");
index e9d6cc7cdfc5c8019422d45914dc0363448bcb12..d6847a664446529831395a962aacab7cb49ab8f5 100644 (file)
@@ -57,6 +57,7 @@ static const struct hid_blacklist {
        { USB_VENDOR_ID_AIREN, USB_DEVICE_ID_AIREN_SLIMPLUS, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_AKAI, USB_DEVICE_ID_AKAI_MPKMINI2, HID_QUIRK_NO_INIT_REPORTS },
        { USB_VENDOR_ID_AKAI_09E8, USB_DEVICE_ID_AKAI_09E8_MIDIMIX, HID_QUIRK_NO_INIT_REPORTS },
+       { USB_VENDOR_ID_AMI, USB_DEVICE_ID_AMI_VIRT_KEYBOARD_AND_MOUSE, HID_QUIRK_ALWAYS_POLL },
        { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_UC100KM, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS124U, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_2PORTKVM, HID_QUIRK_NOGET },
@@ -84,7 +85,7 @@ static const struct hid_blacklist {
        { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3, HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR, HID_QUIRK_MULTI_INPUT },
-       { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE, HID_QUIRK_MULTI_INPUT },
+       { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE1, HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_ELAN, HID_ANY_ID, HID_QUIRK_ALWAYS_POLL },
        { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS },
@@ -102,12 +103,6 @@ static const struct hid_blacklist {
        { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_SURFACE_PRO_2, HID_QUIRK_NO_INIT_REPORTS },
        { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_2, HID_QUIRK_NO_INIT_REPORTS },
        { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TOUCH_COVER_2, HID_QUIRK_NO_INIT_REPORTS },
-       { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3, HID_QUIRK_NO_INIT_REPORTS },
-       { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_2, HID_QUIRK_NO_INIT_REPORTS },
-       { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP, HID_QUIRK_NO_INIT_REPORTS },
-       { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4, HID_QUIRK_NO_INIT_REPORTS },
-       { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_2, HID_QUIRK_NO_INIT_REPORTS },
-       { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_JP, HID_QUIRK_NO_INIT_REPORTS },
        { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER, HID_QUIRK_NO_INIT_REPORTS },
        { USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL, HID_QUIRK_NO_INIT_REPORTS },
        { USB_VENDOR_ID_NEXIO, USB_DEVICE_ID_NEXIO_MULTITOUCH_PTI0750, HID_QUIRK_NO_INIT_REPORTS },
@@ -296,7 +291,7 @@ static void usbhid_remove_all_dquirks(void)
 
 }
 
-/** 
+/**
  * usbhid_quirks_init: apply USB HID quirks specified at module load time
  */
 int usbhid_quirks_init(char **quirks_param)
@@ -360,7 +355,7 @@ static const struct hid_blacklist *usbhid_exists_squirk(const u16 idVendor,
 
        if (bl_entry != NULL)
                dbg_hid("Found squirk 0x%x for USB HID vendor 0x%hx prod 0x%hx\n",
-                               bl_entry->quirks, bl_entry->idVendor, 
+                               bl_entry->quirks, bl_entry->idVendor,
                                bl_entry->idProduct);
        return bl_entry;
 }
index 9a332e683db77d170f2f1b281e813db02896df4a..7fb2d1e4f5ddda9e96c609bf460ddbad65fa596b 100644 (file)
 #define DRIVER_VERSION ""
 #define DRIVER_AUTHOR "Vojtech Pavlik <vojtech@ucw.cz>"
 #define DRIVER_DESC "USB HID Boot Protocol keyboard driver"
-#define DRIVER_LICENSE "GPL"
 
 MODULE_AUTHOR(DRIVER_AUTHOR);
 MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE(DRIVER_LICENSE);
+MODULE_LICENSE("GPL");
 
 static const unsigned char usb_kbd_keycode[256] = {
          0,  0,  0,  0, 30, 48, 46, 32, 18, 33, 34, 35, 23, 36, 37, 38,
index bf16d72dc3700f0b5feec7d6c60bd6942b4759b6..dd911c5241d822d9ffd277547a96a583e63aa4cf 100644 (file)
 #define DRIVER_VERSION "v1.6"
 #define DRIVER_AUTHOR "Vojtech Pavlik <vojtech@ucw.cz>"
 #define DRIVER_DESC "USB HID Boot Protocol mouse driver"
-#define DRIVER_LICENSE "GPL"
 
 MODULE_AUTHOR(DRIVER_AUTHOR);
 MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE(DRIVER_LICENSE);
+MODULE_LICENSE("GPL");
 
 struct usb_mouse {
        char name[128];
index d303e413306df41a1e3f080d5bdd3c00efa75caf..38ee2125412f32c87db8fd5159e443cf0f7bffbf 100644 (file)
 #define DRIVER_VERSION "v2.00"
 #define DRIVER_AUTHOR "Vojtech Pavlik <vojtech@ucw.cz>"
 #define DRIVER_DESC "USB Wacom tablet driver"
-#define DRIVER_LICENSE "GPL"
 
 #define USB_VENDOR_ID_WACOM    0x056a
 #define USB_VENDOR_ID_LENOVO   0x17ef
@@ -166,7 +165,9 @@ struct wacom {
        struct work_struct wireless_work;
        struct work_struct battery_work;
        struct work_struct remote_work;
+       struct delayed_work init_work;
        struct wacom_remote *remote;
+       bool generic_has_leds;
        struct wacom_leds {
                struct wacom_group_leds *groups;
                unsigned int count;
@@ -218,4 +219,6 @@ enum led_brightness wacom_leds_brightness_get(struct wacom_led *led);
 struct wacom_led *wacom_led_find(struct wacom *wacom, unsigned int group,
                                 unsigned int id);
 struct wacom_led *wacom_led_next(struct wacom *wacom, struct wacom_led *cur);
+int wacom_equivalent_usage(int usage);
+int wacom_initialize_leds(struct wacom *wacom);
 #endif
index 8aeca038cc7331244eeeb5dc0468b22ef66e7d44..be8f7e2a026f428f51200e395792dd715a612eeb 100644 (file)
 #include <linux/input/mt.h>
 
 #define WAC_MSG_RETRIES                5
-
-#define WAC_CMD_WL_LED_CONTROL 0x03
-#define WAC_CMD_LED_CONTROL    0x20
-#define WAC_CMD_ICON_START     0x21
-#define WAC_CMD_ICON_XFER      0x23
-#define WAC_CMD_ICON_BT_XFER   0x26
 #define WAC_CMD_RETRIES                10
-#define WAC_CMD_DELETE_PAIRING 0x20
-#define WAC_CMD_UNPAIR_ALL     0xFF
 
 #define DEV_ATTR_RW_PERM (S_IRUGO | S_IWUSR | S_IWGRP)
 #define DEV_ATTR_WO_PERM (S_IWUSR | S_IWGRP)
@@ -120,11 +112,12 @@ static void wacom_feature_mapping(struct hid_device *hdev,
        struct wacom *wacom = hid_get_drvdata(hdev);
        struct wacom_features *features = &wacom->wacom_wac.features;
        struct hid_data *hid_data = &wacom->wacom_wac.hid_data;
+       unsigned int equivalent_usage = wacom_equivalent_usage(usage->hid);
        u8 *data;
        int ret;
        int n;
 
-       switch (usage->hid) {
+       switch (equivalent_usage) {
        case HID_DG_CONTACTMAX:
                /* leave touch_max as is if predefined */
                if (!features->touch_max) {
@@ -333,8 +326,14 @@ static void wacom_post_parse_hid(struct hid_device *hdev,
        if (features->type == HID_GENERIC) {
                /* Any last-minute generic device setup */
                if (features->touch_max > 1) {
-                       input_mt_init_slots(wacom_wac->touch_input, wacom_wac->features.touch_max,
-                                   INPUT_MT_DIRECT);
+                       if (features->device_type & WACOM_DEVICETYPE_DIRECT)
+                               input_mt_init_slots(wacom_wac->touch_input,
+                                                   wacom_wac->features.touch_max,
+                                                   INPUT_MT_DIRECT);
+                       else
+                               input_mt_init_slots(wacom_wac->touch_input,
+                                                   wacom_wac->features.touch_max,
+                                                   INPUT_MT_POINTER);
                }
        }
 }
@@ -497,11 +496,11 @@ static int wacom_bt_query_tablet_data(struct hid_device *hdev, u8 speed,
  * from the tablet, it is necessary to switch the tablet out of this
  * mode and into one which sends the full range of tablet data.
  */
-static int wacom_query_tablet_data(struct hid_device *hdev,
-               struct wacom_features *features)
+static int _wacom_query_tablet_data(struct wacom *wacom)
 {
-       struct wacom *wacom = hid_get_drvdata(hdev);
+       struct hid_device *hdev = wacom->hdev;
        struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+       struct wacom_features *features = &wacom_wac->features;
 
        if (hdev->bus == BUS_BLUETOOTH)
                return wacom_bt_query_tablet_data(hdev, 1, features);
@@ -757,9 +756,6 @@ static int wacom_led_control(struct wacom *wacom)
        unsigned char report_id = WAC_CMD_LED_CONTROL;
        int buf_size = 9;
 
-       if (!hid_get_drvdata(wacom->hdev))
-               return -ENODEV;
-
        if (!wacom->led.groups)
                return -ENOTSUPP;
 
@@ -767,12 +763,21 @@ static int wacom_led_control(struct wacom *wacom)
                report_id = WAC_CMD_WL_LED_CONTROL;
                buf_size = 13;
        }
+       else if (wacom->wacom_wac.features.type == INTUOSP2_BT) {
+               report_id = WAC_CMD_WL_INTUOSP2;
+               buf_size = 51;
+       }
        buf = kzalloc(buf_size, GFP_KERNEL);
        if (!buf)
                return -ENOMEM;
 
-       if (wacom->wacom_wac.features.type >= INTUOS5S &&
-           wacom->wacom_wac.features.type <= INTUOSPL) {
+       if (wacom->wacom_wac.features.type == HID_GENERIC) {
+               buf[0] = WAC_CMD_LED_CONTROL_GENERIC;
+               buf[1] = wacom->led.llv;
+               buf[2] = wacom->led.groups[0].select & 0x03;
+
+       } else if ((wacom->wacom_wac.features.type >= INTUOS5S &&
+           wacom->wacom_wac.features.type <= INTUOSPL)) {
                /*
                 * Touch Ring and crop mark LED luminance may take on
                 * one of four values:
@@ -792,6 +797,16 @@ static int wacom_led_control(struct wacom *wacom)
                } else
                        buf[1] = led_bits;
        }
+       else if (wacom->wacom_wac.features.type == INTUOSP2_BT) {
+               buf[0] = report_id;
+               buf[4] = 100; // Power Connection LED (ORANGE)
+               buf[5] = 100; // BT Connection LED (BLUE)
+               buf[6] = 100; // Paper Mode (RED?)
+               buf[7] = 100; // Paper Mode (GREEN?)
+               buf[8] = 100; // Paper Mode (BLUE?)
+               buf[9] = wacom->led.llv;
+               buf[10] = wacom->led.groups[0].select & 0x03;
+       }
        else {
                int led = wacom->led.groups[0].select | 0x4;
 
@@ -1032,6 +1047,17 @@ static struct attribute_group intuos5_led_attr_group = {
        .attrs = intuos5_led_attrs,
 };
 
+static struct attribute *generic_led_attrs[] = {
+       &dev_attr_status0_luminance.attr,
+       &dev_attr_status_led0_select.attr,
+       NULL
+};
+
+static struct attribute_group generic_led_attr_group = {
+       .name = "wacom_led",
+       .attrs = generic_led_attrs,
+};
+
 struct wacom_sysfs_group_devres {
        struct attribute_group *group;
        struct kobject *root;
@@ -1353,7 +1379,7 @@ static int wacom_leds_alloc_and_register(struct wacom *wacom, int group_count,
        return 0;
 }
 
-static int wacom_initialize_leds(struct wacom *wacom)
+int wacom_initialize_leds(struct wacom *wacom)
 {
        int error;
 
@@ -1362,6 +1388,23 @@ static int wacom_initialize_leds(struct wacom *wacom)
 
        /* Initialize default values */
        switch (wacom->wacom_wac.features.type) {
+       case HID_GENERIC:
+               if (!wacom->generic_has_leds)
+                       return 0;
+               wacom->led.llv = 100;
+               wacom->led.max_llv = 100;
+
+               error = wacom_leds_alloc_and_register(wacom, 1, 4, false);
+               if (error) {
+                       hid_err(wacom->hdev,
+                               "cannot create leds err: %d\n", error);
+                       return error;
+               }
+
+               error = wacom_devm_sysfs_create_group(wacom,
+                                                     &generic_led_attr_group);
+               break;
+
        case INTUOS4S:
        case INTUOS4:
        case INTUOS4WL:
@@ -1420,6 +1463,17 @@ static int wacom_initialize_leds(struct wacom *wacom)
                                                      &intuos5_led_attr_group);
                break;
 
+       case INTUOSP2_BT:
+               wacom->led.llv = 50;
+               wacom->led.max_llv = 100;
+               error = wacom_leds_alloc_and_register(wacom, 1, 4, false);
+               if (error) {
+                       hid_err(wacom->hdev,
+                               "cannot create leds err: %d\n", error);
+                       return error;
+               }
+               return 0;
+
        case REMOTE:
                wacom->led.llv = 255;
                wacom->led.max_llv = 255;
@@ -1440,11 +1494,23 @@ static int wacom_initialize_leds(struct wacom *wacom)
                        "cannot create sysfs group err: %d\n", error);
                return error;
        }
-       wacom_led_control(wacom);
 
        return 0;
 }
 
+static void wacom_init_work(struct work_struct *work)
+{
+       struct wacom *wacom = container_of(work, struct wacom, init_work.work);
+
+       _wacom_query_tablet_data(wacom);
+       wacom_led_control(wacom);
+}
+
+static void wacom_query_tablet_data(struct wacom *wacom)
+{
+       schedule_delayed_work(&wacom->init_work, msecs_to_jiffies(1000));
+}
+
 static enum power_supply_property wacom_battery_props[] = {
        POWER_SUPPLY_PROP_MODEL_NAME,
        POWER_SUPPLY_PROP_PRESENT,
@@ -2020,6 +2086,24 @@ static void wacom_release_resources(struct wacom *wacom)
        wacom->wacom_wac.pad_input = NULL;
 }
 
+static void wacom_set_shared_values(struct wacom_wac *wacom_wac)
+{
+       if (wacom_wac->features.device_type & WACOM_DEVICETYPE_TOUCH) {
+               wacom_wac->shared->type = wacom_wac->features.type;
+               wacom_wac->shared->touch_input = wacom_wac->touch_input;
+       }
+
+       if (wacom_wac->has_mute_touch_switch)
+               wacom_wac->shared->has_mute_touch_switch = true;
+
+       if (wacom_wac->shared->has_mute_touch_switch &&
+           wacom_wac->shared->touch_input) {
+               set_bit(EV_SW, wacom_wac->shared->touch_input->evbit);
+               input_set_capability(wacom_wac->shared->touch_input, EV_SW,
+                                    SW_MUTE_DEVICE);
+       }
+}
+
 static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
 {
        struct wacom_wac *wacom_wac = &wacom->wacom_wac;
@@ -2118,7 +2202,7 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
 
        if (!wireless) {
                /* Note that if query fails it is not a hard failure */
-               wacom_query_tablet_data(hdev, features);
+               wacom_query_tablet_data(wacom);
        }
 
        /* touch only Bamboo doesn't support pen */
@@ -2139,13 +2223,7 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
        if (features->device_type & WACOM_DEVICETYPE_WL_MONITOR)
                error = hid_hw_open(hdev);
 
-       if ((wacom_wac->features.type == INTUOSHT ||
-            wacom_wac->features.type == INTUOSHT2) &&
-           (wacom_wac->features.device_type & WACOM_DEVICETYPE_TOUCH)) {
-               wacom_wac->shared->type = wacom_wac->features.type;
-               wacom_wac->shared->touch_input = wacom_wac->touch_input;
-       }
-
+       wacom_set_shared_values(wacom_wac);
        devres_close_group(&hdev->dev, wacom);
 
        return 0;
@@ -2450,6 +2528,7 @@ static int wacom_probe(struct hid_device *hdev,
        wacom->usbdev = dev;
        wacom->intf = intf;
        mutex_init(&wacom->lock);
+       INIT_DELAYED_WORK(&wacom->init_work, wacom_init_work);
        INIT_WORK(&wacom->wireless_work, wacom_wireless_work);
        INIT_WORK(&wacom->battery_work, wacom_battery_work);
        INIT_WORK(&wacom->remote_work, wacom_remote_work);
@@ -2491,12 +2570,17 @@ static void wacom_remove(struct hid_device *hdev)
 
        hid_hw_stop(hdev);
 
+       cancel_delayed_work_sync(&wacom->init_work);
        cancel_work_sync(&wacom->wireless_work);
        cancel_work_sync(&wacom->battery_work);
        cancel_work_sync(&wacom->remote_work);
        if (hdev->bus == BUS_BLUETOOTH)
                device_remove_file(&hdev->dev, &dev_attr_speed);
 
+       /* make sure we don't trigger the LEDs */
+       wacom_led_groups_release(wacom);
+       wacom_release_resources(wacom);
+
        hid_set_drvdata(hdev, NULL);
 }
 
@@ -2504,12 +2588,11 @@ static void wacom_remove(struct hid_device *hdev)
 static int wacom_resume(struct hid_device *hdev)
 {
        struct wacom *wacom = hid_get_drvdata(hdev);
-       struct wacom_features *features = &wacom->wacom_wac.features;
 
        mutex_lock(&wacom->lock);
 
        /* switch to wacom mode first */
-       wacom_query_tablet_data(hdev, features);
+       _wacom_query_tablet_data(wacom);
        wacom_led_control(wacom);
 
        mutex_unlock(&wacom->lock);
@@ -2540,4 +2623,4 @@ module_hid_driver(wacom_driver);
 MODULE_VERSION(DRIVER_VERSION);
 MODULE_AUTHOR(DRIVER_AUTHOR);
 MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE(DRIVER_LICENSE);
+MODULE_LICENSE("GPL");
index 0884dc9554fdf632e684aa3689292368d5fb7e3b..4aa3de9f1163b30eb64b4304f285a4167aef0cf0 100644 (file)
@@ -43,6 +43,8 @@ static void wacom_report_numbered_buttons(struct input_dev *input_dev,
 
 static int wacom_numbered_button_to_key(int n);
 
+static void wacom_update_led(struct wacom *wacom, int button_count, int mask,
+                            int group);
 /*
  * Percent of battery capacity for Graphire.
  * 8th value means AC online and show 100% capacity.
@@ -166,19 +168,21 @@ static int wacom_pl_irq(struct wacom_wac *wacom)
                wacom->id[0] = STYLUS_DEVICE_ID;
        }
 
-       pressure = (signed char)((data[7] << 1) | ((data[4] >> 2) & 1));
-       if (features->pressure_max > 255)
-               pressure = (pressure << 1) | ((data[4] >> 6) & 1);
-       pressure += (features->pressure_max + 1) / 2;
+       if (prox) {
+               pressure = (signed char)((data[7] << 1) | ((data[4] >> 2) & 1));
+               if (features->pressure_max > 255)
+                       pressure = (pressure << 1) | ((data[4] >> 6) & 1);
+               pressure += (features->pressure_max + 1) / 2;
 
-       input_report_abs(input, ABS_X, data[3] | (data[2] << 7) | ((data[1] & 0x03) << 14));
-       input_report_abs(input, ABS_Y, data[6] | (data[5] << 7) | ((data[4] & 0x03) << 14));
-       input_report_abs(input, ABS_PRESSURE, pressure);
+               input_report_abs(input, ABS_X, data[3] | (data[2] << 7) | ((data[1] & 0x03) << 14));
+               input_report_abs(input, ABS_Y, data[6] | (data[5] << 7) | ((data[4] & 0x03) << 14));
+               input_report_abs(input, ABS_PRESSURE, pressure);
 
-       input_report_key(input, BTN_TOUCH, data[4] & 0x08);
-       input_report_key(input, BTN_STYLUS, data[4] & 0x10);
-       /* Only allow the stylus2 button to be reported for the pen tool. */
-       input_report_key(input, BTN_STYLUS2, (wacom->tool[0] == BTN_TOOL_PEN) && (data[4] & 0x20));
+               input_report_key(input, BTN_TOUCH, data[4] & 0x08);
+               input_report_key(input, BTN_STYLUS, data[4] & 0x10);
+               /* Only allow the stylus2 button to be reported for the pen tool. */
+               input_report_key(input, BTN_STYLUS2, (wacom->tool[0] == BTN_TOOL_PEN) && (data[4] & 0x20));
+       }
 
        if (!prox)
                wacom->id[0] = 0;
@@ -1190,6 +1194,166 @@ static int wacom_wac_finger_count_touches(struct wacom_wac *wacom)
        return count;
 }
 
+static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom)
+{
+       const int pen_frame_len = 14;
+       const int pen_frames = 7;
+
+       struct input_dev *pen_input = wacom->pen_input;
+       unsigned char *data = wacom->data;
+       int i;
+
+       wacom->serial[0] = get_unaligned_le64(&data[99]);
+       wacom->id[0]     = get_unaligned_le16(&data[107]);
+       if (wacom->serial[0] >> 52 == 1) {
+               /* Add back in missing bits of ID for non-USI pens */
+               wacom->id[0] |= (wacom->serial[0] >> 32) & 0xFFFFF;
+       }
+       wacom->tool[0]   = wacom_intuos_get_tool_type(wacom_intuos_id_mangle(wacom->id[0]));
+
+       for (i = 0; i < pen_frames; i++) {
+               unsigned char *frame = &data[i*pen_frame_len + 1];
+               bool valid = frame[0] & 0x80;
+               bool prox = frame[0] & 0x40;
+               bool range = frame[0] & 0x20;
+
+               if (!valid)
+                       continue;
+
+               if (range) {
+                       input_report_abs(pen_input, ABS_X, get_unaligned_le16(&frame[1]));
+                       input_report_abs(pen_input, ABS_Y, get_unaligned_le16(&frame[3]));
+                       input_report_abs(pen_input, ABS_TILT_X, frame[7]);
+                       input_report_abs(pen_input, ABS_TILT_Y, frame[8]);
+                       input_report_abs(pen_input, ABS_Z, get_unaligned_le16(&frame[9]));
+                       input_report_abs(pen_input, ABS_WHEEL, get_unaligned_le16(&frame[11]));
+               }
+               input_report_abs(pen_input, ABS_PRESSURE, get_unaligned_le16(&frame[5]));
+               input_report_abs(pen_input, ABS_DISTANCE, range ? frame[13] : wacom->features.distance_max);
+
+               input_report_key(pen_input, BTN_TOUCH, frame[0] & 0x01);
+               input_report_key(pen_input, BTN_STYLUS, frame[0] & 0x02);
+               input_report_key(pen_input, BTN_STYLUS2, frame[0] & 0x04);
+
+               input_report_key(pen_input, wacom->tool[0], prox);
+               input_event(pen_input, EV_MSC, MSC_SERIAL, wacom->serial[0]);
+               input_report_abs(pen_input, ABS_MISC,
+                                wacom_intuos_id_mangle(wacom->id[0])); /* report tool id */
+
+               wacom->shared->stylus_in_proximity = prox;
+
+               input_sync(pen_input);
+       }
+}
+
+static void wacom_intuos_pro2_bt_touch(struct wacom_wac *wacom)
+{
+       const int finger_touch_len = 8;
+       const int finger_frames = 4;
+       const int finger_frame_len = 43;
+
+       struct input_dev *touch_input = wacom->touch_input;
+       unsigned char *data = wacom->data;
+       int num_contacts_left = 5;
+       int i, j;
+
+       for (i = 0; i < finger_frames; i++) {
+               unsigned char *frame = &data[i*finger_frame_len + 109];
+               int current_num_contacts = frame[0] & 0x7F;
+               int contacts_to_send;
+
+               if (!(frame[0] & 0x80))
+                       continue;
+
+               /*
+                * First packet resets the counter since only the first
+                * packet in series will have non-zero current_num_contacts.
+                */
+               if (current_num_contacts)
+                       wacom->num_contacts_left = current_num_contacts;
+
+               contacts_to_send = min(num_contacts_left, wacom->num_contacts_left);
+
+               for (j = 0; j < contacts_to_send; j++) {
+                       unsigned char *touch = &frame[j*finger_touch_len + 1];
+                       int slot = input_mt_get_slot_by_key(touch_input, touch[0]);
+                       int x = get_unaligned_le16(&touch[2]);
+                       int y = get_unaligned_le16(&touch[4]);
+                       int w = touch[6] * input_abs_get_res(touch_input, ABS_MT_POSITION_X);
+                       int h = touch[7] * input_abs_get_res(touch_input, ABS_MT_POSITION_Y);
+
+                       if (slot < 0)
+                               continue;
+
+                       input_mt_slot(touch_input, slot);
+                       input_mt_report_slot_state(touch_input, MT_TOOL_FINGER, touch[1] & 0x01);
+                       input_report_abs(touch_input, ABS_MT_POSITION_X, x);
+                       input_report_abs(touch_input, ABS_MT_POSITION_Y, y);
+                       input_report_abs(touch_input, ABS_MT_TOUCH_MAJOR, max(w, h));
+                       input_report_abs(touch_input, ABS_MT_TOUCH_MINOR, min(w, h));
+                       input_report_abs(touch_input, ABS_MT_ORIENTATION, w > h);
+               }
+
+               input_mt_sync_frame(touch_input);
+
+               wacom->num_contacts_left -= contacts_to_send;
+               if (wacom->num_contacts_left <= 0) {
+                       wacom->num_contacts_left = 0;
+                       wacom->shared->touch_down = wacom_wac_finger_count_touches(wacom);
+               }
+       }
+
+       input_report_switch(touch_input, SW_MUTE_DEVICE, !(data[281] >> 7));
+       input_sync(touch_input);
+}
+
+static void wacom_intuos_pro2_bt_pad(struct wacom_wac *wacom)
+{
+       struct input_dev *pad_input = wacom->pad_input;
+       unsigned char *data = wacom->data;
+
+       int buttons = (data[282] << 1) | ((data[281] >> 6) & 0x01);
+       int ring = data[285];
+       int prox = buttons | (ring & 0x80);
+
+       wacom_report_numbered_buttons(pad_input, 9, buttons);
+
+       input_report_abs(pad_input, ABS_WHEEL, (ring & 0x80) ? (ring & 0x7f) : 0);
+
+       input_report_key(pad_input, wacom->tool[1], prox ? 1 : 0);
+       input_report_abs(pad_input, ABS_MISC, prox ? PAD_DEVICE_ID : 0);
+       input_event(pad_input, EV_MSC, MSC_SERIAL, 0xffffffff);
+
+       input_sync(pad_input);
+}
+
+static void wacom_intuos_pro2_bt_battery(struct wacom_wac *wacom)
+{
+       unsigned char *data = wacom->data;
+
+       bool chg = data[284] & 0x80;
+       int battery_status = data[284] & 0x7F;
+
+       wacom_notify_battery(wacom, battery_status, chg, 1, chg);
+}
+
+static int wacom_intuos_pro2_bt_irq(struct wacom_wac *wacom, size_t len)
+{
+       unsigned char *data = wacom->data;
+
+       if (data[0] != 0x80) {
+               dev_dbg(wacom->pen_input->dev.parent,
+                       "%s: received unknown report #%d\n", __func__, data[0]);
+               return 0;
+       }
+
+       wacom_intuos_pro2_bt_pen(wacom);
+       wacom_intuos_pro2_bt_touch(wacom);
+       wacom_intuos_pro2_bt_pad(wacom);
+       wacom_intuos_pro2_bt_battery(wacom);
+       return 0;
+}
+
 static int wacom_24hdt_irq(struct wacom_wac *wacom)
 {
        struct input_dev *input = wacom->touch_input;
@@ -1444,7 +1608,7 @@ static int wacom_tpc_irq(struct wacom_wac *wacom, size_t len)
        return 0;
 }
 
-static int wacom_equivalent_usage(int usage)
+int wacom_equivalent_usage(int usage)
 {
        if ((usage & HID_USAGE_PAGE) == WACOM_HID_UP_WACOMDIGITIZER) {
                int subpage = (usage & 0xFF00) << 8;
@@ -1471,6 +1635,16 @@ static int wacom_equivalent_usage(int usage)
                return subpage | subusage;
        }
 
+       if ((usage & HID_USAGE_PAGE) == WACOM_HID_UP_WACOMTOUCH) {
+               int subpage = (usage & 0xFF00) << 8;
+               int subusage = (usage & 0xFF);
+
+               if (subpage == HID_UP_UNDEFINED)
+                       subpage = WACOM_HID_SP_DIGITIZER;
+
+               return subpage | subusage;
+       }
+
        return usage;
 }
 
@@ -1550,12 +1724,14 @@ static void wacom_wac_pad_usage_mapping(struct hid_device *hdev,
                wacom_map_usage(input, usage, field, EV_ABS, ABS_Z, 0);
                features->device_type |= WACOM_DEVICETYPE_PAD;
                break;
+       case WACOM_HID_WD_BUTTONCENTER:
+               wacom->generic_has_leds = true;
+               /* fall through */
        case WACOM_HID_WD_BUTTONHOME:
        case WACOM_HID_WD_BUTTONUP:
        case WACOM_HID_WD_BUTTONDOWN:
        case WACOM_HID_WD_BUTTONLEFT:
        case WACOM_HID_WD_BUTTONRIGHT:
-       case WACOM_HID_WD_BUTTONCENTER:
                wacom_map_usage(input, usage, field, EV_KEY,
                                wacom_numbered_button_to_key(features->numbered_buttons),
                                0);
@@ -1563,7 +1739,17 @@ static void wacom_wac_pad_usage_mapping(struct hid_device *hdev,
                features->device_type |= WACOM_DEVICETYPE_PAD;
                break;
        case WACOM_HID_WD_TOUCHONOFF:
-               wacom_map_usage(input, usage, field, EV_SW, SW_MUTE_DEVICE, 0);
+               /*
+                * This usage, which is used to mute touch events, comes
+                * from the pad packet, but is reported on the touch
+                * interface. Because the touch interface may not have
+                * been created yet, we cannot call wacom_map_usage(). In
+                * order to process this usage when we receive it, we set
+                * the usage type and code directly.
+                */
+               wacom_wac->has_mute_touch_switch = true;
+               usage->type = EV_SW;
+               usage->code = SW_MUTE_DEVICE;
                features->device_type |= WACOM_DEVICETYPE_PAD;
                break;
        case WACOM_HID_WD_TOUCHSTRIP:
@@ -1578,6 +1764,10 @@ static void wacom_wac_pad_usage_mapping(struct hid_device *hdev,
                wacom_map_usage(input, usage, field, EV_ABS, ABS_WHEEL, 0);
                features->device_type |= WACOM_DEVICETYPE_PAD;
                break;
+       case WACOM_HID_WD_TOUCHRINGSTATUS:
+               wacom_map_usage(input, usage, field, EV_ABS, ABS_WHEEL, 0);
+               features->device_type |= WACOM_DEVICETYPE_PAD;
+               break;
        }
 
        switch (equivalent_usage & 0xfffffff0) {
@@ -1620,17 +1810,40 @@ static void wacom_wac_pad_event(struct hid_device *hdev, struct hid_field *field
        struct input_dev *input = wacom_wac->pad_input;
        struct wacom_features *features = &wacom_wac->features;
        unsigned equivalent_usage = wacom_equivalent_usage(usage->hid);
+       int i;
+
+       /*
+        * Avoid reporting this event and setting inrange_state if this usage
+        * hasn't been mapped.
+        */
+       if (!usage->type)
+               return;
 
        if (wacom_equivalent_usage(field->physical) == HID_DG_TABLETFUNCTIONKEY) {
-               wacom_wac->hid_data.inrange_state |= value;
+               if (usage->hid != WACOM_HID_WD_TOUCHRING)
+                       wacom_wac->hid_data.inrange_state |= value;
        }
 
        switch (equivalent_usage) {
        case WACOM_HID_WD_TOUCHRINGSTATUS:
+               if (!value)
+                       input_event(input, usage->type, usage->code, 0);
+               break;
+
+       case WACOM_HID_WD_TOUCHONOFF:
+               if (wacom_wac->shared->touch_input) {
+                       input_report_switch(wacom_wac->shared->touch_input,
+                                           SW_MUTE_DEVICE, !value);
+                       input_sync(wacom_wac->shared->touch_input);
+               }
                break;
 
+       case WACOM_HID_WD_BUTTONCENTER:
+               for (i = 0; i < wacom->led.count; i++)
+                       wacom_update_led(wacom, features->numbered_buttons,
+                                        value, i);
+                /* fall through*/
        default:
-               features->input_event_flag = true;
                input_event(input, usage->type, usage->code, value);
                break;
        }
@@ -1668,20 +1881,15 @@ static void wacom_wac_pad_report(struct hid_device *hdev,
 {
        struct wacom *wacom = hid_get_drvdata(hdev);
        struct wacom_wac *wacom_wac = &wacom->wacom_wac;
-       struct wacom_features *features = &wacom_wac->features;
        struct input_dev *input = wacom_wac->pad_input;
        bool active = wacom_wac->hid_data.inrange_state != 0;
 
        /* report prox for expresskey events */
        if (wacom_equivalent_usage(report->field[0]->physical) == HID_DG_TABLETFUNCTIONKEY) {
-               features->input_event_flag = true;
                input_event(input, EV_ABS, ABS_MISC, active ? PAD_DEVICE_ID : 0);
-       }
-
-       if (features->input_event_flag) {
-               features->input_event_flag = false;
                input_sync(input);
        }
+
 }
 
 static void wacom_wac_pen_usage_mapping(struct hid_device *hdev,
@@ -2056,8 +2264,10 @@ static void wacom_wac_finger_pre_report(struct hid_device *hdev,
 
                for (j = 0; j < field->maxusage; j++) {
                        struct hid_usage *usage = &field->usage[j];
+                       unsigned int equivalent_usage =
+                               wacom_equivalent_usage(usage->hid);
 
-                       switch (usage->hid) {
+                       switch (equivalent_usage) {
                        case HID_GD_X:
                        case HID_GD_Y:
                        case HID_DG_WIDTH:
@@ -2066,7 +2276,7 @@ static void wacom_wac_finger_pre_report(struct hid_device *hdev,
                        case HID_DG_INRANGE:
                        case HID_DG_INVERT:
                        case HID_DG_TIPSWITCH:
-                               hid_data->last_slot_field = usage->hid;
+                               hid_data->last_slot_field = equivalent_usage;
                                break;
                        case HID_DG_CONTACTCOUNT:
                                hid_data->cc_report = report->id;
@@ -2121,8 +2331,8 @@ void wacom_wac_usage_mapping(struct hid_device *hdev,
        struct wacom_wac *wacom_wac = &wacom->wacom_wac;
        struct wacom_features *features = &wacom_wac->features;
 
-       /* currently, only direct devices have proper hid report descriptors */
-       features->device_type |= WACOM_DEVICETYPE_DIRECT;
+       if (WACOM_DIRECT_DEVICE(field))
+               features->device_type |= WACOM_DEVICETYPE_DIRECT;
 
        if (WACOM_PAD_FIELD(field))
                wacom_wac_pad_usage_mapping(hdev, field, usage);
@@ -2140,6 +2350,9 @@ void wacom_wac_event(struct hid_device *hdev, struct hid_field *field,
        if (wacom->wacom_wac.features.type != HID_GENERIC)
                return;
 
+       if (value > field->logical_maximum || value < field->logical_minimum)
+               return;
+
        if (WACOM_PAD_FIELD(field)) {
                wacom_wac_pad_battery_event(hdev, field, usage, value);
                if (wacom->wacom_wac.pad_input)
@@ -2667,6 +2880,10 @@ void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len)
                        sync = wacom_intuos_irq(wacom_wac);
                break;
 
+       case INTUOSP2_BT:
+               sync = wacom_intuos_pro2_bt_irq(wacom_wac, len);
+               break;
+
        case TABLETPC:
        case TABLETPCE:
        case TABLETPC2FG:
@@ -2777,8 +2994,6 @@ void wacom_setup_device_quirks(struct wacom *wacom)
        struct wacom_features *features = &wacom->wacom_wac.features;
 
        /* The pen and pad share the same interface on most devices */
-       if (features->numbered_buttons > 0)
-               features->device_type |= WACOM_DEVICETYPE_PAD;
        if (features->type == GRAPHIRE_BT || features->type == WACOM_G4 ||
            features->type == DTUS ||
            (features->type >= INTUOS3S && features->type <= WACOM_MO)) {
@@ -2838,6 +3053,13 @@ void wacom_setup_device_quirks(struct wacom *wacom)
        if (features->type == REMOTE)
                features->device_type = WACOM_DEVICETYPE_PAD;
 
+       if (features->type == INTUOSP2_BT) {
+               features->device_type |= WACOM_DEVICETYPE_PEN |
+                                        WACOM_DEVICETYPE_PAD |
+                                        WACOM_DEVICETYPE_TOUCH;
+               features->quirks |= WACOM_QUIRK_BATTERY;
+       }
+
        switch (features->type) {
        case PL:
        case DTU:
@@ -2984,6 +3206,7 @@ int wacom_setup_pen_input_capabilities(struct input_dev *input_dev,
        case INTUOSPL:
        case INTUOS5S:
        case INTUOSPS:
+       case INTUOSP2_BT:
                input_set_abs_params(input_dev, ABS_DISTANCE, 0,
                                      features->distance_max,
                                      features->distance_fuzz, 0);
@@ -3092,6 +3315,27 @@ int wacom_setup_touch_input_capabilities(struct input_dev *input_dev,
        }
 
        switch (features->type) {
+       case INTUOSP2_BT:
+               input_dev->evbit[0] |= BIT_MASK(EV_SW);
+               __set_bit(SW_MUTE_DEVICE, input_dev->swbit);
+
+               if (wacom_wac->shared->touch->product == 0x361) {
+                       input_set_abs_params(input_dev, ABS_MT_POSITION_X,
+                                            0, 12440, 4, 0);
+                       input_set_abs_params(input_dev, ABS_MT_POSITION_Y,
+                                            0, 8640, 4, 0);
+               }
+               else if (wacom_wac->shared->touch->product == 0x360) {
+                       input_set_abs_params(input_dev, ABS_MT_POSITION_X,
+                                            0, 8960, 4, 0);
+                       input_set_abs_params(input_dev, ABS_MT_POSITION_Y,
+                                            0, 5920, 4, 0);
+               }
+               input_abs_set_res(input_dev, ABS_MT_POSITION_X, 40);
+               input_abs_set_res(input_dev, ABS_MT_POSITION_X, 40);
+
+               /* fall through */
+
        case INTUOS5:
        case INTUOS5L:
        case INTUOSPM:
@@ -3288,6 +3532,9 @@ int wacom_setup_pad_input_capabilities(struct input_dev *input_dev,
 {
        struct wacom_features *features = &wacom_wac->features;
 
+       if ((features->type == HID_GENERIC) && features->numbered_buttons > 0)
+               features->device_type |= WACOM_DEVICETYPE_PAD;
+
        if (!(features->device_type & WACOM_DEVICETYPE_PAD))
                return -ENODEV;
 
@@ -3389,6 +3636,7 @@ int wacom_setup_pad_input_capabilities(struct input_dev *input_dev,
        case INTUOSPL:
        case INTUOS5S:
        case INTUOSPS:
+       case INTUOSP2_BT:
                input_set_abs_params(input_dev, ABS_WHEEL, 0, 71, 0, 0);
                break;
 
@@ -3947,6 +4195,12 @@ static const struct wacom_features wacom_features_0x343 =
          DTUS, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4,
          WACOM_DTU_OFFSET, WACOM_DTU_OFFSET,
          WACOM_DTU_OFFSET, WACOM_DTU_OFFSET };
+static const struct wacom_features wacom_features_0x360 =
+       { "Wacom Intuos Pro M", 44800, 29600, 8191, 63,
+         INTUOSP2_BT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 9, .touch_max = 10 };
+static const struct wacom_features wacom_features_0x361 =
+       { "Wacom Intuos Pro L", 62200, 43200, 8191, 63,
+         INTUOSP2_BT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 9, .touch_max = 10 };
 
 static const struct wacom_features wacom_features_HID_ANY_ID =
        { "Wacom HID", .type = HID_GENERIC, .oVid = HID_ANY_ID, .oPid = HID_ANY_ID };
@@ -4113,6 +4367,8 @@ const struct hid_device_id wacom_ids[] = {
        { USB_DEVICE_WACOM(0x33D) },
        { USB_DEVICE_WACOM(0x33E) },
        { USB_DEVICE_WACOM(0x343) },
+       { BT_DEVICE_WACOM(0x360) },
+       { BT_DEVICE_WACOM(0x361) },
        { USB_DEVICE_WACOM(0x4001) },
        { USB_DEVICE_WACOM(0x4004) },
        { USB_DEVICE_WACOM(0x5000) },
@@ -4121,6 +4377,7 @@ const struct hid_device_id wacom_ids[] = {
 
        { USB_DEVICE_WACOM(HID_ANY_ID) },
        { I2C_DEVICE_WACOM(HID_ANY_ID) },
+       { BT_DEVICE_WACOM(HID_ANY_ID) },
        { }
 };
 MODULE_DEVICE_TABLE(hid, wacom_ids);
index fb0e50acb10daad461f2bbcd16df45d5037b0db2..857ccee16f389f30c1553750f7c59453ceb3d8a6 100644 (file)
@@ -12,8 +12,8 @@
 #include <linux/types.h>
 #include <linux/hid.h>
 
-/* maximum packet length for USB devices */
-#define WACOM_PKGLEN_MAX       192
+/* maximum packet length for USB/BT devices */
+#define WACOM_PKGLEN_MAX       361
 
 #define WACOM_NAME_MAX         64
 #define WACOM_MAX_REMOTES      5
 #define WACOM_REPORT_REMOTE            17
 #define WACOM_REPORT_INTUOSHT2_ID      8
 
+/* wacom command report ids */
+#define WAC_CMD_WL_LED_CONTROL          0x03
+#define WAC_CMD_LED_CONTROL             0x20
+#define WAC_CMD_ICON_START              0x21
+#define WAC_CMD_ICON_XFER               0x23
+#define WAC_CMD_ICON_BT_XFER            0x26
+#define WAC_CMD_DELETE_PAIRING          0x20
+#define WAC_CMD_LED_CONTROL_GENERIC     0x32
+#define WAC_CMD_UNPAIR_ALL              0xFF
+#define WAC_CMD_WL_INTUOSP2             0x82
+
 /* device quirks */
 #define WACOM_QUIRK_BBTOUCH_LOWRES     0x0001
 #define WACOM_QUIRK_SENSE              0x0002
 #define WACOM_HID_SP_DIGITIZER          0x000d0000
 #define WACOM_HID_SP_DIGITIZERINFO      0x00100000
 #define WACOM_HID_WD_DIGITIZER          (WACOM_HID_UP_WACOMDIGITIZER | 0x01)
+#define WACOM_HID_WD_PEN                (WACOM_HID_UP_WACOMDIGITIZER | 0x02)
 #define WACOM_HID_WD_SENSE              (WACOM_HID_UP_WACOMDIGITIZER | 0x36)
 #define WACOM_HID_WD_DIGITIZERFNKEYS    (WACOM_HID_UP_WACOMDIGITIZER | 0x39)
 #define WACOM_HID_WD_SERIALHI           (WACOM_HID_UP_WACOMDIGITIZER | 0x5c)
 #define WACOM_HID_WD_ACCELEROMETER_Y    (WACOM_HID_UP_WACOMDIGITIZER | 0x0402)
 #define WACOM_HID_WD_ACCELEROMETER_Z    (WACOM_HID_UP_WACOMDIGITIZER | 0x0403)
 #define WACOM_HID_WD_BATTERY_CHARGING   (WACOM_HID_UP_WACOMDIGITIZER | 0x0404)
+#define WACOM_HID_WD_TOUCHONOFF         (WACOM_HID_UP_WACOMDIGITIZER | 0x0454)
 #define WACOM_HID_WD_BATTERY_LEVEL      (WACOM_HID_UP_WACOMDIGITIZER | 0x043b)
 #define WACOM_HID_WD_EXPRESSKEY00       (WACOM_HID_UP_WACOMDIGITIZER | 0x0910)
 #define WACOM_HID_WD_EXPRESSKEYCAP00    (WACOM_HID_UP_WACOMDIGITIZER | 0x0950)
 #define WACOM_HID_WD_BUTTONLEFT         (WACOM_HID_UP_WACOMDIGITIZER | 0x0993)
 #define WACOM_HID_WD_BUTTONRIGHT        (WACOM_HID_UP_WACOMDIGITIZER | 0x0994)
 #define WACOM_HID_WD_BUTTONCENTER       (WACOM_HID_UP_WACOMDIGITIZER | 0x0995)
-#define WACOM_HID_WD_TOUCHONOFF         (WACOM_HID_UP_WACOMDIGITIZER | 0x0996)
 #define WACOM_HID_WD_FINGERWHEEL        (WACOM_HID_UP_WACOMDIGITIZER | 0x0d03)
 #define WACOM_HID_WD_OFFSETLEFT         (WACOM_HID_UP_WACOMDIGITIZER | 0x0d30)
 #define WACOM_HID_WD_OFFSETTOP          (WACOM_HID_UP_WACOMDIGITIZER | 0x0d31)
 #define WACOM_HID_UP_G11                0xff110000
 #define WACOM_HID_G11_PEN               (WACOM_HID_UP_G11 | 0x02)
 #define WACOM_HID_G11_TOUCHSCREEN       (WACOM_HID_UP_G11 | 0x11)
+#define WACOM_HID_UP_WACOMTOUCH         0xff000000
+#define WACOM_HID_WT_TOUCHSCREEN        (WACOM_HID_UP_WACOMTOUCH | 0x04)
+#define WACOM_HID_WT_TOUCHPAD           (WACOM_HID_UP_WACOMTOUCH | 0x05)
+#define WACOM_HID_WT_CONTACTMAX         (WACOM_HID_UP_WACOMTOUCH | 0x55)
+#define WACOM_HID_WT_X                  (WACOM_HID_UP_WACOMTOUCH | 0x130)
+#define WACOM_HID_WT_Y                  (WACOM_HID_UP_WACOMTOUCH | 0x131)
 
 #define WACOM_PAD_FIELD(f)     (((f)->physical == HID_DG_TABLETFUNCTIONKEY) || \
                                 ((f)->physical == WACOM_HID_WD_DIGITIZERFNKEYS) || \
                                 ((f)->physical == HID_DG_FINGER) || \
                                 ((f)->application == HID_DG_TOUCHSCREEN) || \
                                 ((f)->application == WACOM_HID_G9_TOUCHSCREEN) || \
-                                ((f)->application == WACOM_HID_G11_TOUCHSCREEN))
+                                ((f)->application == WACOM_HID_G11_TOUCHSCREEN) || \
+                                ((f)->application == WACOM_HID_WT_TOUCHPAD) || \
+                                ((f)->application == HID_DG_TOUCHPAD))
+
+#define WACOM_DIRECT_DEVICE(f) (((f)->application == HID_DG_TOUCHSCREEN) || \
+                                ((f)->application == WACOM_HID_WT_TOUCHSCREEN) || \
+                                ((f)->application == HID_DG_PEN) || \
+                                ((f)->application == WACOM_HID_WD_PEN))
 
 enum {
        PENPARTNER = 0,
@@ -170,6 +195,7 @@ enum {
        INTUOSPS,
        INTUOSPM,
        INTUOSPL,
+       INTUOSP2_BT,
        WACOM_21UX2,
        WACOM_22HD,
        DTK,
@@ -232,7 +258,6 @@ struct wacom_features {
        int pktlen;
        bool check_for_hid_type;
        int hid_type;
-       bool input_event_flag;
 };
 
 struct wacom_shared {
@@ -244,6 +269,7 @@ struct wacom_shared {
        struct input_dev *touch_input;
        struct hid_device *pen;
        struct hid_device *touch;
+       bool has_mute_touch_switch;
 };
 
 struct hid_data {
@@ -300,6 +326,7 @@ struct wacom_wac {
        int mode_report;
        int mode_value;
        struct hid_data hid_data;
+       bool has_mute_touch_switch;
 };
 
 #endif
index cd49cb17eb7fb385ddbf507ef14b2ea2be090159..308dbda700ebdaeb02f222aa46dc7bb79c24c0da 100644 (file)
@@ -383,6 +383,7 @@ int hv_ringbuffer_read(struct vmbus_channel *channel,
                return ret;
        }
 
+       init_cached_read_index(channel);
        next_read_location = hv_get_next_read_location(inring_info);
        next_read_location = hv_copyfrom_ringbuffer(inring_info, &desc,
                                                    sizeof(desc),
index 190d270b20a25e4686ba22ccda4a6cb1c3ab8cd4..0649d53f3d16eeaef7516f830a1a5046a7f3d107 100644 (file)
@@ -1459,6 +1459,16 @@ config SENSORS_SCH5636
          This driver can also be built as a module.  If so, the module
          will be called sch5636.
 
+config SENSORS_STTS751
+       tristate "ST Microelectronics STTS751"
+       depends on I2C
+       help
+         If you say yes here you get support for STTS751
+         temperature sensor chips.
+
+         This driver can also be built as a module.  If so, the module
+         will be called stts751.
+
 config SENSORS_SMM665
        tristate "Summit Microelectronics SMM665"
        depends on I2C
index d2cb7e804a0f4f1555daec6fe47358904c6f767b..5509edf6186acef711284bbde2cbebbc0afdac8f 100644 (file)
@@ -148,6 +148,7 @@ obj-$(CONFIG_SENSORS_SMM665)        += smm665.o
 obj-$(CONFIG_SENSORS_SMSC47B397)+= smsc47b397.o
 obj-$(CONFIG_SENSORS_SMSC47M1) += smsc47m1.o
 obj-$(CONFIG_SENSORS_SMSC47M192)+= smsc47m192.o
+obj-$(CONFIG_SENSORS_STTS751)  += stts751.o
 obj-$(CONFIG_SENSORS_AMC6821)  += amc6821.o
 obj-$(CONFIG_SENSORS_TC74)     += tc74.o
 obj-$(CONFIG_SENSORS_THMC50)   += thmc50.o
index ad2b47e403452a230c9f3e57454517a25e63289f..bbe3a5c5b3f51f338661f60f0f6c76390f4489e8 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/regulator/consumer.h>
 #include <linux/mutex.h>
 #include <linux/bitops.h>
+#include <linux/of.h>
 
 /* Addresses to scan
  * The chip also supports addresses 0x35..0x37. Don't scan those addresses
@@ -58,15 +59,22 @@ static const unsigned short normal_i2c[] = {
 #define ADC128_REG_MAN_ID              0x3e
 #define ADC128_REG_DEV_ID              0x3f
 
+/* No. of voltage entries in adc128_attrs */
+#define ADC128_ATTR_NUM_VOLT           (8 * 4)
+
+/* Voltage inputs visible per operation mode */
+static const u8 num_inputs[] = { 7, 8, 4, 6 };
+
 struct adc128_data {
        struct i2c_client *client;
        struct regulator *regulator;
        int vref;               /* Reference voltage in mV */
        struct mutex update_lock;
+       u8 mode;                /* Operation mode */
        bool valid;             /* true if following fields are valid */
        unsigned long last_updated;     /* In jiffies */
 
-       u16 in[3][7];           /* Register value, normalized to 12 bit
+       u16 in[3][8];           /* Register value, normalized to 12 bit
                                 * 0: input voltage
                                 * 1: min limit
                                 * 2: max limit
@@ -87,7 +95,7 @@ static struct adc128_data *adc128_update_device(struct device *dev)
        mutex_lock(&data->update_lock);
 
        if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
-               for (i = 0; i < 7; i++) {
+               for (i = 0; i < num_inputs[data->mode]; i++) {
                        rv = i2c_smbus_read_word_swapped(client,
                                                         ADC128_REG_IN(i));
                        if (rv < 0)
@@ -107,20 +115,25 @@ static struct adc128_data *adc128_update_device(struct device *dev)
                        data->in[2][i] = rv << 4;
                }
 
-               rv = i2c_smbus_read_word_swapped(client, ADC128_REG_TEMP);
-               if (rv < 0)
-                       goto abort;
-               data->temp[0] = rv >> 7;
+               if (data->mode != 1) {
+                       rv = i2c_smbus_read_word_swapped(client,
+                                                        ADC128_REG_TEMP);
+                       if (rv < 0)
+                               goto abort;
+                       data->temp[0] = rv >> 7;
 
-               rv = i2c_smbus_read_byte_data(client, ADC128_REG_TEMP_MAX);
-               if (rv < 0)
-                       goto abort;
-               data->temp[1] = rv << 1;
+                       rv = i2c_smbus_read_byte_data(client,
+                                                     ADC128_REG_TEMP_MAX);
+                       if (rv < 0)
+                               goto abort;
+                       data->temp[1] = rv << 1;
 
-               rv = i2c_smbus_read_byte_data(client, ADC128_REG_TEMP_HYST);
-               if (rv < 0)
-                       goto abort;
-               data->temp[2] = rv << 1;
+                       rv = i2c_smbus_read_byte_data(client,
+                                                     ADC128_REG_TEMP_HYST);
+                       if (rv < 0)
+                               goto abort;
+                       data->temp[2] = rv << 1;
+               }
 
                rv = i2c_smbus_read_byte_data(client, ADC128_REG_ALARM);
                if (rv < 0)
@@ -240,6 +253,25 @@ static ssize_t adc128_show_alarm(struct device *dev,
        return sprintf(buf, "%u\n", !!(alarms & mask));
 }
 
+static umode_t adc128_is_visible(struct kobject *kobj,
+                                struct attribute *attr, int index)
+{
+       struct device *dev = container_of(kobj, struct device, kobj);
+       struct adc128_data *data = dev_get_drvdata(dev);
+
+       if (index < ADC128_ATTR_NUM_VOLT) {
+               /* Voltage, visible according to num_inputs[] */
+               if (index >= num_inputs[data->mode] * 4)
+                       return 0;
+       } else {
+               /* Temperature, visible if not in mode 1 */
+               if (data->mode == 1)
+                       return 0;
+       }
+
+       return attr->mode;
+}
+
 static SENSOR_DEVICE_ATTR_2(in0_input, S_IRUGO,
                            adc128_show_in, NULL, 0, 0);
 static SENSOR_DEVICE_ATTR_2(in0_min, S_IWUSR | S_IRUGO,
@@ -289,6 +321,13 @@ static SENSOR_DEVICE_ATTR_2(in6_min, S_IWUSR | S_IRUGO,
 static SENSOR_DEVICE_ATTR_2(in6_max, S_IWUSR | S_IRUGO,
                            adc128_show_in, adc128_set_in, 6, 2);
 
+static SENSOR_DEVICE_ATTR_2(in7_input, S_IRUGO,
+                           adc128_show_in, NULL, 7, 0);
+static SENSOR_DEVICE_ATTR_2(in7_min, S_IWUSR | S_IRUGO,
+                           adc128_show_in, adc128_set_in, 7, 1);
+static SENSOR_DEVICE_ATTR_2(in7_max, S_IWUSR | S_IRUGO,
+                           adc128_show_in, adc128_set_in, 7, 2);
+
 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, adc128_show_temp, NULL, 0);
 static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO,
                          adc128_show_temp, adc128_set_temp, 1);
@@ -302,44 +341,54 @@ static SENSOR_DEVICE_ATTR(in3_alarm, S_IRUGO, adc128_show_alarm, NULL, 3);
 static SENSOR_DEVICE_ATTR(in4_alarm, S_IRUGO, adc128_show_alarm, NULL, 4);
 static SENSOR_DEVICE_ATTR(in5_alarm, S_IRUGO, adc128_show_alarm, NULL, 5);
 static SENSOR_DEVICE_ATTR(in6_alarm, S_IRUGO, adc128_show_alarm, NULL, 6);
+static SENSOR_DEVICE_ATTR(in7_alarm, S_IRUGO, adc128_show_alarm, NULL, 7);
 static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, adc128_show_alarm, NULL, 7);
 
 static struct attribute *adc128_attrs[] = {
-       &sensor_dev_attr_in0_min.dev_attr.attr,
-       &sensor_dev_attr_in1_min.dev_attr.attr,
-       &sensor_dev_attr_in2_min.dev_attr.attr,
-       &sensor_dev_attr_in3_min.dev_attr.attr,
-       &sensor_dev_attr_in4_min.dev_attr.attr,
-       &sensor_dev_attr_in5_min.dev_attr.attr,
-       &sensor_dev_attr_in6_min.dev_attr.attr,
-       &sensor_dev_attr_in0_max.dev_attr.attr,
-       &sensor_dev_attr_in1_max.dev_attr.attr,
-       &sensor_dev_attr_in2_max.dev_attr.attr,
-       &sensor_dev_attr_in3_max.dev_attr.attr,
-       &sensor_dev_attr_in4_max.dev_attr.attr,
-       &sensor_dev_attr_in5_max.dev_attr.attr,
-       &sensor_dev_attr_in6_max.dev_attr.attr,
+       &sensor_dev_attr_in0_alarm.dev_attr.attr,
        &sensor_dev_attr_in0_input.dev_attr.attr,
+       &sensor_dev_attr_in0_max.dev_attr.attr,
+       &sensor_dev_attr_in0_min.dev_attr.attr,
+       &sensor_dev_attr_in1_alarm.dev_attr.attr,
        &sensor_dev_attr_in1_input.dev_attr.attr,
+       &sensor_dev_attr_in1_max.dev_attr.attr,
+       &sensor_dev_attr_in1_min.dev_attr.attr,
+       &sensor_dev_attr_in2_alarm.dev_attr.attr,
        &sensor_dev_attr_in2_input.dev_attr.attr,
+       &sensor_dev_attr_in2_max.dev_attr.attr,
+       &sensor_dev_attr_in2_min.dev_attr.attr,
+       &sensor_dev_attr_in3_alarm.dev_attr.attr,
        &sensor_dev_attr_in3_input.dev_attr.attr,
+       &sensor_dev_attr_in3_max.dev_attr.attr,
+       &sensor_dev_attr_in3_min.dev_attr.attr,
+       &sensor_dev_attr_in4_alarm.dev_attr.attr,
        &sensor_dev_attr_in4_input.dev_attr.attr,
+       &sensor_dev_attr_in4_max.dev_attr.attr,
+       &sensor_dev_attr_in4_min.dev_attr.attr,
+       &sensor_dev_attr_in5_alarm.dev_attr.attr,
        &sensor_dev_attr_in5_input.dev_attr.attr,
+       &sensor_dev_attr_in5_max.dev_attr.attr,
+       &sensor_dev_attr_in5_min.dev_attr.attr,
+       &sensor_dev_attr_in6_alarm.dev_attr.attr,
        &sensor_dev_attr_in6_input.dev_attr.attr,
+       &sensor_dev_attr_in6_max.dev_attr.attr,
+       &sensor_dev_attr_in6_min.dev_attr.attr,
+       &sensor_dev_attr_in7_alarm.dev_attr.attr,
+       &sensor_dev_attr_in7_input.dev_attr.attr,
+       &sensor_dev_attr_in7_max.dev_attr.attr,
+       &sensor_dev_attr_in7_min.dev_attr.attr,
        &sensor_dev_attr_temp1_input.dev_attr.attr,
        &sensor_dev_attr_temp1_max.dev_attr.attr,
-       &sensor_dev_attr_temp1_max_hyst.dev_attr.attr,
-       &sensor_dev_attr_in0_alarm.dev_attr.attr,
-       &sensor_dev_attr_in1_alarm.dev_attr.attr,
-       &sensor_dev_attr_in2_alarm.dev_attr.attr,
-       &sensor_dev_attr_in3_alarm.dev_attr.attr,
-       &sensor_dev_attr_in4_alarm.dev_attr.attr,
-       &sensor_dev_attr_in5_alarm.dev_attr.attr,
-       &sensor_dev_attr_in6_alarm.dev_attr.attr,
        &sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
+       &sensor_dev_attr_temp1_max_hyst.dev_attr.attr,
        NULL
 };
-ATTRIBUTE_GROUPS(adc128);
+
+static struct attribute_group adc128_group = {
+       .attrs = adc128_attrs,
+       .is_visible = adc128_is_visible,
+};
+__ATTRIBUTE_GROUPS(adc128);
 
 static int adc128_detect(struct i2c_client *client, struct i2c_board_info *info)
 {
@@ -387,6 +436,15 @@ static int adc128_init_client(struct adc128_data *data)
        if (err)
                return err;
 
+       /* Set operation mode, if non-default */
+       if (data->mode != 0) {
+               err = i2c_smbus_write_byte_data(client,
+                                               ADC128_REG_CONFIG_ADV,
+                                               data->mode << 1);
+               if (err)
+                       return err;
+       }
+
        /* Start monitoring */
        err = i2c_smbus_write_byte_data(client, ADC128_REG_CONFIG, 0x01);
        if (err)
@@ -433,6 +491,21 @@ static int adc128_probe(struct i2c_client *client,
                data->vref = 2560;      /* 2.56V, in mV */
        }
 
+       /* Operation mode is optional. If unspecified, keep current mode */
+       if (of_property_read_u8(dev->of_node, "ti,mode", &data->mode) == 0) {
+               if (data->mode > 3) {
+                       dev_err(dev, "invalid operation mode %d\n",
+                               data->mode);
+                       err = -EINVAL;
+                       goto error;
+               }
+       } else {
+               err = i2c_smbus_read_byte_data(client, ADC128_REG_CONFIG_ADV);
+               if (err < 0)
+                       goto error;
+               data->mode = (err >> 1) & ADC128_REG_MASK;
+       }
+
        data->client = client;
        i2c_set_clientdata(client, data);
        mutex_init(&data->update_lock);
index 1fdcc3e703b9c364a39008e6c5c3a711d2483686..eacf10fadbc61994b2597afcc98dd8a929662889 100644 (file)
@@ -191,7 +191,7 @@ static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
        return sprintf(buf, "%u\n", (data->alarms >> index) & 1);
 }
 
-static ssize_t show_alarms(struct device *dev,
+static ssize_t alarms_show(struct device *dev,
                           struct device_attribute *attr,
                           char *buf)
 {
@@ -251,16 +251,16 @@ static ssize_t set_temp_min(struct device *dev,
        return count;
 }
 
-static ssize_t show_low_power(struct device *dev,
+static ssize_t low_power_show(struct device *dev,
                              struct device_attribute *devattr, char *buf)
 {
        struct adm1021_data *data = adm1021_update_device(dev);
        return sprintf(buf, "%d\n", data->low_power);
 }
 
-static ssize_t set_low_power(struct device *dev,
-                            struct device_attribute *devattr,
-                            const char *buf, size_t count)
+static ssize_t low_power_store(struct device *dev,
+                              struct device_attribute *devattr,
+                              const char *buf, size_t count)
 {
        struct adm1021_data *data = dev_get_drvdata(dev);
        struct i2c_client *client = data->client;
@@ -303,8 +303,8 @@ static SENSOR_DEVICE_ATTR(temp2_max_alarm, S_IRUGO, show_alarm, NULL, 4);
 static SENSOR_DEVICE_ATTR(temp2_min_alarm, S_IRUGO, show_alarm, NULL, 3);
 static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_alarm, NULL, 2);
 
-static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
-static DEVICE_ATTR(low_power, S_IWUSR | S_IRUGO, show_low_power, set_low_power);
+static DEVICE_ATTR_RO(alarms);
+static DEVICE_ATTR_RW(low_power);
 
 static struct attribute *adm1021_attributes[] = {
        &sensor_dev_attr_temp1_max.dev_attr.attr,
index 1abb4609b41235bf6f84787eed67a4b706981618..1e4dad36f5efd318c40c0dbda5d1743cdf920d17 100644 (file)
@@ -333,12 +333,12 @@ set_temp(1);
 set_temp(2);
 
 static ssize_t
-show_alarms(struct device *dev, struct device_attribute *attr, char *buf)
+alarms_show(struct device *dev, struct device_attribute *attr, char *buf)
 {
        struct adm1025_data *data = adm1025_update_device(dev);
        return sprintf(buf, "%u\n", data->alarms);
 }
-static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
+static DEVICE_ATTR_RO(alarms);
 
 static ssize_t
 show_alarm(struct device *dev, struct device_attribute *attr, char *buf)
@@ -358,21 +358,21 @@ static SENSOR_DEVICE_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL, 4);
 static SENSOR_DEVICE_ATTR(temp1_fault, S_IRUGO, show_alarm, NULL, 14);
 
 static ssize_t
-show_vid(struct device *dev, struct device_attribute *attr, char *buf)
+cpu0_vid_show(struct device *dev, struct device_attribute *attr, char *buf)
 {
        struct adm1025_data *data = adm1025_update_device(dev);
        return sprintf(buf, "%u\n", vid_from_reg(data->vid, data->vrm));
 }
-static DEVICE_ATTR(cpu0_vid, S_IRUGO, show_vid, NULL);
+static DEVICE_ATTR_RO(cpu0_vid);
 
 static ssize_t
-show_vrm(struct device *dev, struct device_attribute *attr, char *buf)
+vrm_show(struct device *dev, struct device_attribute *attr, char *buf)
 {
        struct adm1025_data *data = dev_get_drvdata(dev);
        return sprintf(buf, "%u\n", data->vrm);
 }
-static ssize_t set_vrm(struct device *dev, struct device_attribute *attr,
-                      const char *buf, size_t count)
+static ssize_t vrm_store(struct device *dev, struct device_attribute *attr,
+                        const char *buf, size_t count)
 {
        struct adm1025_data *data = dev_get_drvdata(dev);
        unsigned long val;
@@ -388,7 +388,7 @@ static ssize_t set_vrm(struct device *dev, struct device_attribute *attr,
        data->vrm = val;
        return count;
 }
-static DEVICE_ATTR(vrm, S_IRUGO | S_IWUSR, show_vrm, set_vrm);
+static DEVICE_ATTR_RW(vrm);
 
 /*
  * Real code
index b2a5d9e5c590c724c8ca1957c92314a0d2709157..e43f09a07cd025ad032e88029cf5ec6beb37d55c 100644 (file)
@@ -1034,15 +1034,15 @@ temp_crit_reg(1);
 temp_crit_reg(2);
 temp_crit_reg(3);
 
-static ssize_t show_analog_out_reg(struct device *dev,
-                                  struct device_attribute *attr, char *buf)
+static ssize_t analog_out_show(struct device *dev,
+                              struct device_attribute *attr, char *buf)
 {
        struct adm1026_data *data = adm1026_update_device(dev);
        return sprintf(buf, "%d\n", DAC_FROM_REG(data->analog_out));
 }
-static ssize_t set_analog_out_reg(struct device *dev,
-                                 struct device_attribute *attr,
-                                 const char *buf, size_t count)
+static ssize_t analog_out_store(struct device *dev,
+                               struct device_attribute *attr,
+                               const char *buf, size_t count)
 {
        struct adm1026_data *data = dev_get_drvdata(dev);
        struct i2c_client *client = data->client;
@@ -1060,11 +1060,10 @@ static ssize_t set_analog_out_reg(struct device *dev,
        return count;
 }
 
-static DEVICE_ATTR(analog_out, S_IRUGO | S_IWUSR, show_analog_out_reg,
-       set_analog_out_reg);
+static DEVICE_ATTR_RW(analog_out);
 
-static ssize_t show_vid_reg(struct device *dev, struct device_attribute *attr,
-                           char *buf)
+static ssize_t cpu0_vid_show(struct device *dev,
+                            struct device_attribute *attr, char *buf)
 {
        struct adm1026_data *data = adm1026_update_device(dev);
        int vid = (data->gpio >> 11) & 0x1f;
@@ -1073,17 +1072,17 @@ static ssize_t show_vid_reg(struct device *dev, struct device_attribute *attr,
        return sprintf(buf, "%d\n", vid_from_reg(vid, data->vrm));
 }
 
-static DEVICE_ATTR(cpu0_vid, S_IRUGO, show_vid_reg, NULL);
+static DEVICE_ATTR_RO(cpu0_vid);
 
-static ssize_t show_vrm_reg(struct device *dev, struct device_attribute *attr,
-                           char *buf)
+static ssize_t vrm_show(struct device *dev, struct device_attribute *attr,
+                       char *buf)
 {
        struct adm1026_data *data = dev_get_drvdata(dev);
        return sprintf(buf, "%d\n", data->vrm);
 }
 
-static ssize_t store_vrm_reg(struct device *dev, struct device_attribute *attr,
-                            const char *buf, size_t count)
+static ssize_t vrm_store(struct device *dev, struct device_attribute *attr,
+                        const char *buf, size_t count)
 {
        struct adm1026_data *data = dev_get_drvdata(dev);
        unsigned long val;
@@ -1100,16 +1099,16 @@ static ssize_t store_vrm_reg(struct device *dev, struct device_attribute *attr,
        return count;
 }
 
-static DEVICE_ATTR(vrm, S_IRUGO | S_IWUSR, show_vrm_reg, store_vrm_reg);
+static DEVICE_ATTR_RW(vrm);
 
-static ssize_t show_alarms_reg(struct device *dev,
-                              struct device_attribute *attr, char *buf)
+static ssize_t alarms_show(struct device *dev, struct device_attribute *attr,
+                          char *buf)
 {
        struct adm1026_data *data = adm1026_update_device(dev);
        return sprintf(buf, "%ld\n", data->alarms);
 }
 
-static DEVICE_ATTR(alarms, S_IRUGO, show_alarms_reg, NULL);
+static DEVICE_ATTR_RO(alarms);
 
 static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
                          char *buf)
@@ -1148,14 +1147,15 @@ static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 24);
 static SENSOR_DEVICE_ATTR(in10_alarm, S_IRUGO, show_alarm, NULL, 25);
 static SENSOR_DEVICE_ATTR(in8_alarm, S_IRUGO, show_alarm, NULL, 26);
 
-static ssize_t show_alarm_mask(struct device *dev,
+static ssize_t alarm_mask_show(struct device *dev,
                               struct device_attribute *attr, char *buf)
 {
        struct adm1026_data *data = adm1026_update_device(dev);
        return sprintf(buf, "%ld\n", data->alarm_mask);
 }
-static ssize_t set_alarm_mask(struct device *dev, struct device_attribute *attr,
-                             const char *buf, size_t count)
+static ssize_t alarm_mask_store(struct device *dev,
+                               struct device_attribute *attr,
+                               const char *buf, size_t count)
 {
        struct adm1026_data *data = dev_get_drvdata(dev);
        struct i2c_client *client = data->client;
@@ -1186,18 +1186,17 @@ static ssize_t set_alarm_mask(struct device *dev, struct device_attribute *attr,
        return count;
 }
 
-static DEVICE_ATTR(alarm_mask, S_IRUGO | S_IWUSR, show_alarm_mask,
-       set_alarm_mask);
+static DEVICE_ATTR_RW(alarm_mask);
 
 
-static ssize_t show_gpio(struct device *dev, struct device_attribute *attr,
+static ssize_t gpio_show(struct device *dev, struct device_attribute *attr,
                         char *buf)
 {
        struct adm1026_data *data = adm1026_update_device(dev);
        return sprintf(buf, "%ld\n", data->gpio);
 }
-static ssize_t set_gpio(struct device *dev, struct device_attribute *attr,
-                       const char *buf, size_t count)
+static ssize_t gpio_store(struct device *dev, struct device_attribute *attr,
+                         const char *buf, size_t count)
 {
        struct adm1026_data *data = dev_get_drvdata(dev);
        struct i2c_client *client = data->client;
@@ -1221,16 +1220,18 @@ static ssize_t set_gpio(struct device *dev, struct device_attribute *attr,
        return count;
 }
 
-static DEVICE_ATTR(gpio, S_IRUGO | S_IWUSR, show_gpio, set_gpio);
+static DEVICE_ATTR_RW(gpio);
 
-static ssize_t show_gpio_mask(struct device *dev, struct device_attribute *attr,
+static ssize_t gpio_mask_show(struct device *dev,
+                             struct device_attribute *attr,
                              char *buf)
 {
        struct adm1026_data *data = adm1026_update_device(dev);
        return sprintf(buf, "%ld\n", data->gpio_mask);
 }
-static ssize_t set_gpio_mask(struct device *dev, struct device_attribute *attr,
-                            const char *buf, size_t count)
+static ssize_t gpio_mask_store(struct device *dev,
+                              struct device_attribute *attr, const char *buf,
+                              size_t count)
 {
        struct adm1026_data *data = dev_get_drvdata(dev);
        struct i2c_client *client = data->client;
@@ -1254,17 +1255,17 @@ static ssize_t set_gpio_mask(struct device *dev, struct device_attribute *attr,
        return count;
 }
 
-static DEVICE_ATTR(gpio_mask, S_IRUGO | S_IWUSR, show_gpio_mask, set_gpio_mask);
+static DEVICE_ATTR_RW(gpio_mask);
 
-static ssize_t show_pwm_reg(struct device *dev, struct device_attribute *attr,
-                           char *buf)
+static ssize_t pwm1_show(struct device *dev, struct device_attribute *attr,
+                        char *buf)
 {
        struct adm1026_data *data = adm1026_update_device(dev);
        return sprintf(buf, "%d\n", PWM_FROM_REG(data->pwm1.pwm));
 }
 
-static ssize_t set_pwm_reg(struct device *dev, struct device_attribute *attr,
-                          const char *buf, size_t count)
+static ssize_t pwm1_store(struct device *dev, struct device_attribute *attr,
+                         const char *buf, size_t count)
 {
        struct adm1026_data *data = dev_get_drvdata(dev);
        struct i2c_client *client = data->client;
@@ -1285,16 +1286,17 @@ static ssize_t set_pwm_reg(struct device *dev, struct device_attribute *attr,
        return count;
 }
 
-static ssize_t show_auto_pwm_min(struct device *dev,
-                                struct device_attribute *attr, char *buf)
+static ssize_t temp1_auto_point1_pwm_show(struct device *dev,
+                                         struct device_attribute *attr,
+                                         char *buf)
 {
        struct adm1026_data *data = adm1026_update_device(dev);
        return sprintf(buf, "%d\n", data->pwm1.auto_pwm_min);
 }
 
-static ssize_t set_auto_pwm_min(struct device *dev,
-                               struct device_attribute *attr, const char *buf,
-                               size_t count)
+static ssize_t temp1_auto_point1_pwm_store(struct device *dev,
+                                          struct device_attribute *attr,
+                                          const char *buf, size_t count)
 {
        struct adm1026_data *data = dev_get_drvdata(dev);
        struct i2c_client *client = data->client;
@@ -1316,21 +1318,23 @@ static ssize_t set_auto_pwm_min(struct device *dev,
        return count;
 }
 
-static ssize_t show_auto_pwm_max(struct device *dev,
-                                struct device_attribute *attr, char *buf)
+static ssize_t temp1_auto_point2_pwm_show(struct device *dev,
+                                         struct device_attribute *attr,
+                                         char *buf)
 {
        return sprintf(buf, "%d\n", ADM1026_PWM_MAX);
 }
 
-static ssize_t show_pwm_enable(struct device *dev,
-                              struct device_attribute *attr, char *buf)
+static ssize_t pwm1_enable_show(struct device *dev,
+                               struct device_attribute *attr, char *buf)
 {
        struct adm1026_data *data = adm1026_update_device(dev);
        return sprintf(buf, "%d\n", data->pwm1.enable);
 }
 
-static ssize_t set_pwm_enable(struct device *dev, struct device_attribute *attr,
-                             const char *buf, size_t count)
+static ssize_t pwm1_enable_store(struct device *dev,
+                                struct device_attribute *attr,
+                                const char *buf, size_t count)
 {
        struct adm1026_data *data = dev_get_drvdata(dev);
        struct i2c_client *client = data->client;
@@ -1366,25 +1370,25 @@ static ssize_t set_pwm_enable(struct device *dev, struct device_attribute *attr,
 }
 
 /* enable PWM fan control */
-static DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, show_pwm_reg, set_pwm_reg);
-static DEVICE_ATTR(pwm2, S_IRUGO | S_IWUSR, show_pwm_reg, set_pwm_reg);
-static DEVICE_ATTR(pwm3, S_IRUGO | S_IWUSR, show_pwm_reg, set_pwm_reg);
-static DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, show_pwm_enable,
-       set_pwm_enable);
-static DEVICE_ATTR(pwm2_enable, S_IRUGO | S_IWUSR, show_pwm_enable,
-       set_pwm_enable);
-static DEVICE_ATTR(pwm3_enable, S_IRUGO | S_IWUSR, show_pwm_enable,
-       set_pwm_enable);
-static DEVICE_ATTR(temp1_auto_point1_pwm, S_IRUGO | S_IWUSR,
-       show_auto_pwm_min, set_auto_pwm_min);
+static DEVICE_ATTR_RW(pwm1);
+static DEVICE_ATTR(pwm2, S_IRUGO | S_IWUSR, pwm1_show, pwm1_store);
+static DEVICE_ATTR(pwm3, S_IRUGO | S_IWUSR, pwm1_show, pwm1_store);
+static DEVICE_ATTR_RW(pwm1_enable);
+static DEVICE_ATTR(pwm2_enable, S_IRUGO | S_IWUSR, pwm1_enable_show,
+                  pwm1_enable_store);
+static DEVICE_ATTR(pwm3_enable, S_IRUGO | S_IWUSR, pwm1_enable_show,
+                  pwm1_enable_store);
+static DEVICE_ATTR_RW(temp1_auto_point1_pwm);
 static DEVICE_ATTR(temp2_auto_point1_pwm, S_IRUGO | S_IWUSR,
-       show_auto_pwm_min, set_auto_pwm_min);
+       temp1_auto_point1_pwm_show, temp1_auto_point1_pwm_store);
 static DEVICE_ATTR(temp3_auto_point1_pwm, S_IRUGO | S_IWUSR,
-       show_auto_pwm_min, set_auto_pwm_min);
+       temp1_auto_point1_pwm_show, temp1_auto_point1_pwm_store);
 
-static DEVICE_ATTR(temp1_auto_point2_pwm, S_IRUGO, show_auto_pwm_max, NULL);
-static DEVICE_ATTR(temp2_auto_point2_pwm, S_IRUGO, show_auto_pwm_max, NULL);
-static DEVICE_ATTR(temp3_auto_point2_pwm, S_IRUGO, show_auto_pwm_max, NULL);
+static DEVICE_ATTR_RO(temp1_auto_point2_pwm);
+static DEVICE_ATTR(temp2_auto_point2_pwm, S_IRUGO, temp1_auto_point2_pwm_show,
+                  NULL);
+static DEVICE_ATTR(temp3_auto_point2_pwm, S_IRUGO, temp1_auto_point2_pwm_show,
+                  NULL);
 
 static struct attribute *adm1026_attributes[] = {
        &sensor_dev_attr_in0_input.dev_attr.attr,
index a5818980dad7fcb01b8857c6b24b3154ddaef920..bcf508269fd63bdb11b5fb10360e1b752ebafdd7 100644 (file)
@@ -829,14 +829,14 @@ temp_reg(2);
 temp_reg(3);
 
 /* Alarms */
-static ssize_t show_alarms(struct device *dev, struct device_attribute *attr,
+static ssize_t alarms_show(struct device *dev, struct device_attribute *attr,
                           char *buf)
 {
        struct adm1031_data *data = adm1031_update_device(dev);
        return sprintf(buf, "%d\n", data->alarm);
 }
 
-static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
+static DEVICE_ATTR_RO(alarms);
 
 static ssize_t show_alarm(struct device *dev,
                          struct device_attribute *attr, char *buf)
@@ -867,7 +867,7 @@ static const unsigned int update_intervals[] = {
        16000, 8000, 4000, 2000, 1000, 500, 250, 125,
 };
 
-static ssize_t show_update_interval(struct device *dev,
+static ssize_t update_interval_show(struct device *dev,
                                    struct device_attribute *attr, char *buf)
 {
        struct adm1031_data *data = dev_get_drvdata(dev);
@@ -875,9 +875,9 @@ static ssize_t show_update_interval(struct device *dev,
        return sprintf(buf, "%u\n", data->update_interval);
 }
 
-static ssize_t set_update_interval(struct device *dev,
-                                  struct device_attribute *attr,
-                                  const char *buf, size_t count)
+static ssize_t update_interval_store(struct device *dev,
+                                    struct device_attribute *attr,
+                                    const char *buf, size_t count)
 {
        struct adm1031_data *data = dev_get_drvdata(dev);
        struct i2c_client *client = data->client;
@@ -912,8 +912,7 @@ static ssize_t set_update_interval(struct device *dev,
        return count;
 }
 
-static DEVICE_ATTR(update_interval, S_IRUGO | S_IWUSR, show_update_interval,
-                  set_update_interval);
+static DEVICE_ATTR_RW(update_interval);
 
 static struct attribute *adm1031_attributes[] = {
        &sensor_dev_attr_fan1_input.dev_attr.attr,
index 72bf2489511e132be2311b7f8a69bfdf36ba0153..255413fdbde9438eccdfdc3a901034182b0b9f4b 100644 (file)
@@ -262,8 +262,8 @@ static struct adm9240_data *adm9240_update_device(struct device *dev)
 /*** sysfs accessors ***/
 
 /* temperature */
-static ssize_t show_temp(struct device *dev, struct device_attribute *dummy,
-               char *buf)
+static ssize_t temp1_input_show(struct device *dev,
+                               struct device_attribute *dummy, char *buf)
 {
        struct adm9240_data *data = adm9240_update_device(dev);
        return sprintf(buf, "%d\n", data->temp / 128 * 500); /* 9-bit value */
@@ -298,7 +298,7 @@ static ssize_t set_max(struct device *dev, struct device_attribute *devattr,
        return count;
 }
 
-static DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL);
+static DEVICE_ATTR_RO(temp1_input);
 static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO,
                show_max, set_max, 0);
 static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IWUSR | S_IRUGO,
@@ -501,13 +501,13 @@ fan(1);
 fan(2);
 
 /* alarms */
-static ssize_t show_alarms(struct device *dev,
+static ssize_t alarms_show(struct device *dev,
                struct device_attribute *attr, char *buf)
 {
        struct adm9240_data *data = adm9240_update_device(dev);
        return sprintf(buf, "%u\n", data->alarms);
 }
-static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
+static DEVICE_ATTR_RO(alarms);
 
 static ssize_t show_alarm(struct device *dev,
                struct device_attribute *attr, char *buf)
@@ -527,25 +527,25 @@ static SENSOR_DEVICE_ATTR(fan1_alarm, S_IRUGO, show_alarm, NULL, 6);
 static SENSOR_DEVICE_ATTR(fan2_alarm, S_IRUGO, show_alarm, NULL, 7);
 
 /* vid */
-static ssize_t show_vid(struct device *dev,
-               struct device_attribute *attr, char *buf)
+static ssize_t cpu0_vid_show(struct device *dev,
+                            struct device_attribute *attr, char *buf)
 {
        struct adm9240_data *data = adm9240_update_device(dev);
        return sprintf(buf, "%d\n", vid_from_reg(data->vid, data->vrm));
 }
-static DEVICE_ATTR(cpu0_vid, S_IRUGO, show_vid, NULL);
+static DEVICE_ATTR_RO(cpu0_vid);
 
 /* analog output */
-static ssize_t show_aout(struct device *dev,
-               struct device_attribute *attr, char *buf)
+static ssize_t aout_output_show(struct device *dev,
+                               struct device_attribute *attr, char *buf)
 {
        struct adm9240_data *data = adm9240_update_device(dev);
        return sprintf(buf, "%d\n", AOUT_FROM_REG(data->aout));
 }
 
-static ssize_t set_aout(struct device *dev,
-               struct device_attribute *attr,
-               const char *buf, size_t count)
+static ssize_t aout_output_store(struct device *dev,
+                                struct device_attribute *attr,
+                                const char *buf, size_t count)
 {
        struct adm9240_data *data = dev_get_drvdata(dev);
        struct i2c_client *client = data->client;
@@ -562,7 +562,7 @@ static ssize_t set_aout(struct device *dev,
        mutex_unlock(&data->update_lock);
        return count;
 }
-static DEVICE_ATTR(aout_output, S_IRUGO | S_IWUSR, show_aout, set_aout);
+static DEVICE_ATTR_RW(aout_output);
 
 static ssize_t chassis_clear(struct device *dev,
                struct device_attribute *attr,
index bdeaece9641d4ace87a2bd5a21917fc35adeb3bd..b939f8a115bab410b2ad666740df629afc6d702e 100644 (file)
 #include <linux/hwmon-sysfs.h>
 #include <linux/slab.h>
 
+#define ADT7411_REG_STAT_1                     0x00
+#define ADT7411_STAT_1_INT_TEMP_HIGH           BIT(0)
+#define ADT7411_STAT_1_INT_TEMP_LOW            BIT(1)
+#define ADT7411_STAT_1_EXT_TEMP_HIGH_AIN1      BIT(2)
+#define ADT7411_STAT_1_EXT_TEMP_LOW            BIT(3)
+#define ADT7411_STAT_1_EXT_TEMP_FAULT          BIT(4)
+#define ADT7411_STAT_1_AIN2                    BIT(5)
+#define ADT7411_STAT_1_AIN3                    BIT(6)
+#define ADT7411_STAT_1_AIN4                    BIT(7)
+#define ADT7411_REG_STAT_2                     0x01
+#define ADT7411_STAT_2_AIN5                    BIT(0)
+#define ADT7411_STAT_2_AIN6                    BIT(1)
+#define ADT7411_STAT_2_AIN7                    BIT(2)
+#define ADT7411_STAT_2_AIN8                    BIT(3)
+#define ADT7411_STAT_2_VDD                     BIT(4)
 #define ADT7411_REG_INT_TEMP_VDD_LSB           0x03
 #define ADT7411_REG_EXT_TEMP_AIN14_LSB         0x04
 #define ADT7411_REG_VDD_MSB                    0x06
 #define ADT7411_REG_EXT_TEMP_AIN1_MSB          0x08
 
 #define ADT7411_REG_CFG1                       0x18
-#define ADT7411_CFG1_START_MONITOR             (1 << 0)
-#define ADT7411_CFG1_RESERVED_BIT1             (1 << 1)
-#define ADT7411_CFG1_EXT_TDM                   (1 << 2)
-#define ADT7411_CFG1_RESERVED_BIT3             (1 << 3)
+#define ADT7411_CFG1_START_MONITOR             BIT(0)
+#define ADT7411_CFG1_RESERVED_BIT1             BIT(1)
+#define ADT7411_CFG1_EXT_TDM                   BIT(2)
+#define ADT7411_CFG1_RESERVED_BIT3             BIT(3)
 
 #define ADT7411_REG_CFG2                       0x19
-#define ADT7411_CFG2_DISABLE_AVG               (1 << 5)
+#define ADT7411_CFG2_DISABLE_AVG               BIT(5)
 
 #define ADT7411_REG_CFG3                       0x1a
-#define ADT7411_CFG3_ADC_CLK_225               (1 << 0)
-#define ADT7411_CFG3_RESERVED_BIT1             (1 << 1)
-#define ADT7411_CFG3_RESERVED_BIT2             (1 << 2)
-#define ADT7411_CFG3_RESERVED_BIT3             (1 << 3)
-#define ADT7411_CFG3_REF_VDD                   (1 << 4)
+#define ADT7411_CFG3_ADC_CLK_225               BIT(0)
+#define ADT7411_CFG3_RESERVED_BIT1             BIT(1)
+#define ADT7411_CFG3_RESERVED_BIT2             BIT(2)
+#define ADT7411_CFG3_RESERVED_BIT3             BIT(3)
+#define ADT7411_CFG3_REF_VDD                   BIT(4)
+
+#define ADT7411_REG_VDD_HIGH                   0x23
+#define ADT7411_REG_VDD_LOW                    0x24
+#define ADT7411_REG_TEMP_HIGH(nr)              (0x25 + 2 * (nr))
+#define ADT7411_REG_TEMP_LOW(nr)               (0x26 + 2 * (nr))
+#define ADT7411_REG_IN_HIGH(nr)                ((nr) > 1 \
+                                                 ? 0x2b + 2 * ((nr)-2) \
+                                                 : 0x27)
+#define ADT7411_REG_IN_LOW(nr)                 ((nr) > 1 \
+                                                 ? 0x2c + 2 * ((nr)-2) \
+                                                 : 0x28)
 
 #define ADT7411_REG_DEVICE_ID                  0x4d
 #define ADT7411_REG_MANUFACTURER_ID            0x4e
 
 static const unsigned short normal_i2c[] = { 0x48, 0x4a, 0x4b, I2C_CLIENT_END };
 
+static const u8 adt7411_in_alarm_reg[] = {
+       ADT7411_REG_STAT_2,
+       ADT7411_REG_STAT_1,
+       ADT7411_REG_STAT_1,
+       ADT7411_REG_STAT_1,
+       ADT7411_REG_STAT_1,
+       ADT7411_REG_STAT_2,
+       ADT7411_REG_STAT_2,
+       ADT7411_REG_STAT_2,
+       ADT7411_REG_STAT_2,
+};
+
+static const u8 adt7411_in_alarm_bits[] = {
+       ADT7411_STAT_2_VDD,
+       ADT7411_STAT_1_EXT_TEMP_HIGH_AIN1,
+       ADT7411_STAT_1_AIN2,
+       ADT7411_STAT_1_AIN3,
+       ADT7411_STAT_1_AIN4,
+       ADT7411_STAT_2_AIN5,
+       ADT7411_STAT_2_AIN6,
+       ADT7411_STAT_2_AIN7,
+       ADT7411_STAT_2_AIN8,
+};
+
 struct adt7411_data {
        struct mutex device_lock;       /* for "atomic" device accesses */
        struct mutex update_lock;
@@ -165,6 +215,19 @@ static struct attribute *adt7411_attrs[] = {
 };
 ATTRIBUTE_GROUPS(adt7411);
 
+static int adt7411_read_in_alarm(struct device *dev, int channel, long *val)
+{
+       struct adt7411_data *data = dev_get_drvdata(dev);
+       struct i2c_client *client = data->client;
+       int ret;
+
+       ret = i2c_smbus_read_byte_data(client, adt7411_in_alarm_reg[channel]);
+       if (ret < 0)
+               return ret;
+       *val = !!(ret & adt7411_in_alarm_bits[channel]);
+       return 0;
+}
+
 static int adt7411_read_in_vdd(struct device *dev, u32 attr, long *val)
 {
        struct adt7411_data *data = dev_get_drvdata(dev);
@@ -179,32 +242,41 @@ static int adt7411_read_in_vdd(struct device *dev, u32 attr, long *val)
                        return ret;
                *val = ret * 7000 / 1024;
                return 0;
+       case hwmon_in_min:
+               ret = i2c_smbus_read_byte_data(client, ADT7411_REG_VDD_LOW);
+               if (ret < 0)
+                       return ret;
+               *val = ret * 7000 / 256;
+               return 0;
+       case hwmon_in_max:
+               ret = i2c_smbus_read_byte_data(client, ADT7411_REG_VDD_HIGH);
+               if (ret < 0)
+                       return ret;
+               *val = ret * 7000 / 256;
+               return 0;
+       case hwmon_in_alarm:
+               return adt7411_read_in_alarm(dev, 0, val);
        default:
                return -EOPNOTSUPP;
        }
 }
 
-static int adt7411_read_in_chan(struct device *dev, u32 attr, int channel,
-                               long *val)
+static int adt7411_update_vref(struct device *dev)
 {
        struct adt7411_data *data = dev_get_drvdata(dev);
        struct i2c_client *client = data->client;
+       int val;
 
-       int ret;
-       int lsb_reg, lsb_shift;
-       int nr = channel - 1;
-
-       mutex_lock(&data->update_lock);
        if (time_after_eq(jiffies, data->next_update)) {
-               ret = i2c_smbus_read_byte_data(client, ADT7411_REG_CFG3);
-               if (ret < 0)
-                       goto exit_unlock;
+               val = i2c_smbus_read_byte_data(client, ADT7411_REG_CFG3);
+               if (val < 0)
+                       return val;
 
-               if (ret & ADT7411_CFG3_REF_VDD) {
-                       ret = adt7411_read_in_vdd(dev, hwmon_in_input,
+               if (val & ADT7411_CFG3_REF_VDD) {
+                       val = adt7411_read_in_vdd(dev, hwmon_in_input,
                                                  &data->vref_cached);
-                       if (ret < 0)
-                               goto exit_unlock;
+                       if (val < 0)
+                               return val;
                } else {
                        data->vref_cached = 2250;
                }
@@ -212,6 +284,24 @@ static int adt7411_read_in_chan(struct device *dev, u32 attr, int channel,
                data->next_update = jiffies + HZ;
        }
 
+       return 0;
+}
+
+static int adt7411_read_in_chan(struct device *dev, u32 attr, int channel,
+                               long *val)
+{
+       struct adt7411_data *data = dev_get_drvdata(dev);
+       struct i2c_client *client = data->client;
+
+       int ret;
+       int reg, lsb_reg, lsb_shift;
+       int nr = channel - 1;
+
+       mutex_lock(&data->update_lock);
+       ret = adt7411_update_vref(dev);
+       if (ret < 0)
+               goto exit_unlock;
+
        switch (attr) {
        case hwmon_in_input:
                lsb_reg = ADT7411_REG_EXT_TEMP_AIN14_LSB + (nr >> 2);
@@ -224,6 +314,20 @@ static int adt7411_read_in_chan(struct device *dev, u32 attr, int channel,
                *val = ret * data->vref_cached / 1024;
                ret = 0;
                break;
+       case hwmon_in_min:
+       case hwmon_in_max:
+               reg = (attr == hwmon_in_min)
+                       ? ADT7411_REG_IN_LOW(channel)
+                       : ADT7411_REG_IN_HIGH(channel);
+               ret = i2c_smbus_read_byte_data(client, reg);
+               if (ret < 0)
+                       goto exit_unlock;
+               *val = ret * data->vref_cached / 256;
+               ret = 0;
+               break;
+       case hwmon_in_alarm:
+               ret = adt7411_read_in_alarm(dev, channel, val);
+               break;
        default:
                ret = -EOPNOTSUPP;
                break;
@@ -242,12 +346,44 @@ static int adt7411_read_in(struct device *dev, u32 attr, int channel,
                return adt7411_read_in_chan(dev, attr, channel, val);
 }
 
+
+static int adt7411_read_temp_alarm(struct device *dev, u32 attr, int channel,
+                                  long *val)
+{
+       struct adt7411_data *data = dev_get_drvdata(dev);
+       struct i2c_client *client = data->client;
+       int ret, bit;
+
+       ret = i2c_smbus_read_byte_data(client, ADT7411_REG_STAT_1);
+       if (ret < 0)
+               return ret;
+
+       switch (attr) {
+       case hwmon_temp_min_alarm:
+               bit = channel ? ADT7411_STAT_1_EXT_TEMP_LOW
+                             : ADT7411_STAT_1_INT_TEMP_LOW;
+               break;
+       case hwmon_temp_max_alarm:
+               bit = channel ? ADT7411_STAT_1_EXT_TEMP_HIGH_AIN1
+                             : ADT7411_STAT_1_INT_TEMP_HIGH;
+               break;
+       case hwmon_temp_fault:
+               bit = ADT7411_STAT_1_EXT_TEMP_FAULT;
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       *val = !!(ret & bit);
+       return 0;
+}
+
 static int adt7411_read_temp(struct device *dev, u32 attr, int channel,
                             long *val)
 {
        struct adt7411_data *data = dev_get_drvdata(dev);
        struct i2c_client *client = data->client;
-       int ret, regl, regh;
+       int ret, reg, regl, regh;
 
        switch (attr) {
        case hwmon_temp_input:
@@ -261,6 +397,21 @@ static int adt7411_read_temp(struct device *dev, u32 attr, int channel,
                ret = ret & 0x200 ? ret - 0x400 : ret; /* 10 bit signed */
                *val = ret * 250;
                return 0;
+       case hwmon_temp_min:
+       case hwmon_temp_max:
+               reg = (attr == hwmon_temp_min)
+                       ? ADT7411_REG_TEMP_LOW(channel)
+                       : ADT7411_REG_TEMP_HIGH(channel);
+               ret = i2c_smbus_read_byte_data(client, reg);
+               if (ret < 0)
+                       return ret;
+               ret = ret & 0x80 ? ret - 0x100 : ret; /* 8 bit signed */
+               *val = ret * 1000;
+               return 0;
+       case hwmon_temp_min_alarm:
+       case hwmon_temp_max_alarm:
+       case hwmon_temp_fault:
+               return adt7411_read_temp_alarm(dev, attr, channel, val);
        default:
                return -EOPNOTSUPP;
        }
@@ -279,26 +430,143 @@ static int adt7411_read(struct device *dev, enum hwmon_sensor_types type,
        }
 }
 
+static int adt7411_write_in_vdd(struct device *dev, u32 attr, long val)
+{
+       struct adt7411_data *data = dev_get_drvdata(dev);
+       struct i2c_client *client = data->client;
+       int reg;
+
+       val = clamp_val(val, 0, 255 * 7000 / 256);
+       val = DIV_ROUND_CLOSEST(val * 256, 7000);
+
+       switch (attr) {
+       case hwmon_in_min:
+               reg = ADT7411_REG_VDD_LOW;
+               break;
+       case hwmon_in_max:
+               reg = ADT7411_REG_VDD_HIGH;
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       return i2c_smbus_write_byte_data(client, reg, val);
+}
+
+static int adt7411_write_in_chan(struct device *dev, u32 attr, int channel,
+                                long val)
+{
+       struct adt7411_data *data = dev_get_drvdata(dev);
+       struct i2c_client *client = data->client;
+       int ret, reg;
+
+       mutex_lock(&data->update_lock);
+       ret = adt7411_update_vref(dev);
+       if (ret < 0)
+               goto exit_unlock;
+       val = clamp_val(val, 0, 255 * data->vref_cached / 256);
+       val = DIV_ROUND_CLOSEST(val * 256, data->vref_cached);
+
+       switch (attr) {
+       case hwmon_in_min:
+               reg = ADT7411_REG_IN_LOW(channel);
+               break;
+       case hwmon_in_max:
+               reg = ADT7411_REG_IN_HIGH(channel);
+               break;
+       default:
+               ret = -EOPNOTSUPP;
+               goto exit_unlock;
+       }
+
+       ret = i2c_smbus_write_byte_data(client, reg, val);
+ exit_unlock:
+       mutex_unlock(&data->update_lock);
+       return ret;
+}
+
+static int adt7411_write_in(struct device *dev, u32 attr, int channel,
+                           long val)
+{
+       if (channel == 0)
+               return adt7411_write_in_vdd(dev, attr, val);
+       else
+               return adt7411_write_in_chan(dev, attr, channel, val);
+}
+
+static int adt7411_write_temp(struct device *dev, u32 attr, int channel,
+                             long val)
+{
+       struct adt7411_data *data = dev_get_drvdata(dev);
+       struct i2c_client *client = data->client;
+       int reg;
+
+       val = clamp_val(val, -128000, 127000);
+       val = DIV_ROUND_CLOSEST(val, 1000);
+
+       switch (attr) {
+       case hwmon_temp_min:
+               reg = ADT7411_REG_TEMP_LOW(channel);
+               break;
+       case hwmon_temp_max:
+               reg = ADT7411_REG_TEMP_HIGH(channel);
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       return i2c_smbus_write_byte_data(client, reg, val);
+}
+
+static int adt7411_write(struct device *dev, enum hwmon_sensor_types type,
+                        u32 attr, int channel, long val)
+{
+       switch (type) {
+       case hwmon_in:
+               return adt7411_write_in(dev, attr, channel, val);
+       case hwmon_temp:
+               return adt7411_write_temp(dev, attr, channel, val);
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
 static umode_t adt7411_is_visible(const void *_data,
                                  enum hwmon_sensor_types type,
                                  u32 attr, int channel)
 {
        const struct adt7411_data *data = _data;
+       bool visible;
 
        switch (type) {
        case hwmon_in:
-               if (channel > 0 && channel < 3)
-                       return data->use_ext_temp ? 0 : S_IRUGO;
-               else
-                       return S_IRUGO;
+               visible = channel == 0 || channel >= 3 || !data->use_ext_temp;
+               switch (attr) {
+               case hwmon_in_input:
+               case hwmon_in_alarm:
+                       return visible ? S_IRUGO : 0;
+               case hwmon_in_min:
+               case hwmon_in_max:
+                       return visible ? S_IRUGO | S_IWUSR : 0;
+               }
+               break;
        case hwmon_temp:
-               if (channel == 1)
-                       return data->use_ext_temp ? S_IRUGO : 0;
-               else
-                       return S_IRUGO;
+               visible = channel == 0 || data->use_ext_temp;
+               switch (attr) {
+               case hwmon_temp_input:
+               case hwmon_temp_min_alarm:
+               case hwmon_temp_max_alarm:
+               case hwmon_temp_fault:
+                       return visible ? S_IRUGO : 0;
+               case hwmon_temp_min:
+               case hwmon_temp_max:
+                       return visible ? S_IRUGO | S_IWUSR : 0;
+               }
+               break;
        default:
-               return 0;
+               break;
        }
+       return 0;
 }
 
 static int adt7411_detect(struct i2c_client *client,
@@ -372,15 +640,15 @@ static int adt7411_init_device(struct adt7411_data *data)
 }
 
 static const u32 adt7411_in_config[] = {
-       HWMON_I_INPUT,
-       HWMON_I_INPUT,
-       HWMON_I_INPUT,
-       HWMON_I_INPUT,
-       HWMON_I_INPUT,
-       HWMON_I_INPUT,
-       HWMON_I_INPUT,
-       HWMON_I_INPUT,
-       HWMON_I_INPUT,
+       HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX | HWMON_I_ALARM,
+       HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX | HWMON_I_ALARM,
+       HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX | HWMON_I_ALARM,
+       HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX | HWMON_I_ALARM,
+       HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX | HWMON_I_ALARM,
+       HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX | HWMON_I_ALARM,
+       HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX | HWMON_I_ALARM,
+       HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX | HWMON_I_ALARM,
+       HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX | HWMON_I_ALARM,
        0
 };
 
@@ -390,8 +658,10 @@ static const struct hwmon_channel_info adt7411_in = {
 };
 
 static const u32 adt7411_temp_config[] = {
-       HWMON_T_INPUT,
-       HWMON_T_INPUT,
+       HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MIN_ALARM |
+               HWMON_T_MAX | HWMON_T_MAX_ALARM,
+       HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MIN_ALARM |
+               HWMON_T_MAX | HWMON_T_MAX_ALARM | HWMON_T_FAULT,
        0
 };
 
@@ -409,6 +679,7 @@ static const struct hwmon_channel_info *adt7411_info[] = {
 static const struct hwmon_ops adt7411_hwmon_ops = {
        .is_visible = adt7411_is_visible,
        .read = adt7411_read,
+       .write = adt7411_write,
 };
 
 static const struct hwmon_chip_info adt7411_chip_info = {
index c9a1d9c25572a466dab697a01e9b7c2a00808702..2cd920751441d3f23b1829c1a5dc1c77c3fdedfd 100644 (file)
@@ -403,7 +403,7 @@ out:
        return data;
 }
 
-static ssize_t show_auto_update_interval(struct device *dev,
+static ssize_t auto_update_interval_show(struct device *dev,
                                         struct device_attribute *devattr,
                                         char *buf)
 {
@@ -411,10 +411,9 @@ static ssize_t show_auto_update_interval(struct device *dev,
        return sprintf(buf, "%d\n", data->auto_update_interval);
 }
 
-static ssize_t set_auto_update_interval(struct device *dev,
-                                       struct device_attribute *devattr,
-                                       const char *buf,
-                                       size_t count)
+static ssize_t auto_update_interval_store(struct device *dev,
+                                         struct device_attribute *devattr,
+                                         const char *buf, size_t count)
 {
        struct adt7470_data *data = dev_get_drvdata(dev);
        long temp;
@@ -431,7 +430,7 @@ static ssize_t set_auto_update_interval(struct device *dev,
        return count;
 }
 
-static ssize_t show_num_temp_sensors(struct device *dev,
+static ssize_t num_temp_sensors_show(struct device *dev,
                                     struct device_attribute *devattr,
                                     char *buf)
 {
@@ -439,10 +438,9 @@ static ssize_t show_num_temp_sensors(struct device *dev,
        return sprintf(buf, "%d\n", data->num_temp_sensors);
 }
 
-static ssize_t set_num_temp_sensors(struct device *dev,
-                                   struct device_attribute *devattr,
-                                   const char *buf,
-                                   size_t count)
+static ssize_t num_temp_sensors_store(struct device *dev,
+                                     struct device_attribute *devattr,
+                                     const char *buf, size_t count)
 {
        struct adt7470_data *data = dev_get_drvdata(dev);
        long temp;
@@ -537,7 +535,7 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *devattr,
        return sprintf(buf, "%d\n", 1000 * data->temp[attr->index]);
 }
 
-static ssize_t show_alarm_mask(struct device *dev,
+static ssize_t alarm_mask_show(struct device *dev,
                           struct device_attribute *devattr,
                           char *buf)
 {
@@ -546,10 +544,9 @@ static ssize_t show_alarm_mask(struct device *dev,
        return sprintf(buf, "%x\n", data->alarms_mask);
 }
 
-static ssize_t set_alarm_mask(struct device *dev,
-                             struct device_attribute *devattr,
-                             const char *buf,
-                             size_t count)
+static ssize_t alarm_mask_store(struct device *dev,
+                               struct device_attribute *devattr,
+                               const char *buf, size_t count)
 {
        struct adt7470_data *data = dev_get_drvdata(dev);
        long mask;
@@ -723,8 +720,8 @@ static const int adt7470_freq_map[] = {
        11, 15, 22, 29, 35, 44, 59, 88, 1400, 22500
 };
 
-static ssize_t show_pwm_freq(struct device *dev,
-                            struct device_attribute *devattr, char *buf)
+static ssize_t pwm1_freq_show(struct device *dev,
+                             struct device_attribute *devattr, char *buf)
 {
        struct adt7470_data *data = adt7470_update_device(dev);
        unsigned char cfg_reg_1;
@@ -745,9 +742,9 @@ static ssize_t show_pwm_freq(struct device *dev,
        return scnprintf(buf, PAGE_SIZE, "%d\n", adt7470_freq_map[index]);
 }
 
-static ssize_t set_pwm_freq(struct device *dev,
-                           struct device_attribute *devattr,
-                           const char *buf, size_t count)
+static ssize_t pwm1_freq_store(struct device *dev,
+                              struct device_attribute *devattr,
+                              const char *buf, size_t count)
 {
        struct adt7470_data *data = dev_get_drvdata(dev);
        struct i2c_client *client = data->client;
@@ -1012,12 +1009,9 @@ static ssize_t show_alarm(struct device *dev,
                return sprintf(buf, "0\n");
 }
 
-static DEVICE_ATTR(alarm_mask, S_IWUSR | S_IRUGO, show_alarm_mask,
-                  set_alarm_mask);
-static DEVICE_ATTR(num_temp_sensors, S_IWUSR | S_IRUGO, show_num_temp_sensors,
-                  set_num_temp_sensors);
-static DEVICE_ATTR(auto_update_interval, S_IWUSR | S_IRUGO,
-                  show_auto_update_interval, set_auto_update_interval);
+static DEVICE_ATTR_RW(alarm_mask);
+static DEVICE_ATTR_RW(num_temp_sensors);
+static DEVICE_ATTR_RW(auto_update_interval);
 
 static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_temp_max,
                    set_temp_max, 0);
@@ -1133,7 +1127,7 @@ static SENSOR_DEVICE_ATTR(pwm2, S_IWUSR | S_IRUGO, show_pwm, set_pwm, 1);
 static SENSOR_DEVICE_ATTR(pwm3, S_IWUSR | S_IRUGO, show_pwm, set_pwm, 2);
 static SENSOR_DEVICE_ATTR(pwm4, S_IWUSR | S_IRUGO, show_pwm, set_pwm, 3);
 
-static DEVICE_ATTR(pwm1_freq, S_IWUSR | S_IRUGO, show_pwm_freq, set_pwm_freq);
+static DEVICE_ATTR_RW(pwm1_freq);
 
 static SENSOR_DEVICE_ATTR(pwm1_auto_point1_pwm, S_IWUSR | S_IRUGO,
                    show_pwm_min, set_pwm_min, 0);
index 3cefd1aeb24f4a6c73f4bffd64fe7fc5b1edea18..c646670b9ea9269b95d6a37f26daeb186a939833 100644 (file)
@@ -856,16 +856,17 @@ static ssize_t set_pwmfreq(struct device *dev, struct device_attribute *attr,
        return count;
 }
 
-static ssize_t show_pwm_at_crit(struct device *dev,
-                               struct device_attribute *devattr, char *buf)
+static ssize_t pwm_use_point2_pwm_at_crit_show(struct device *dev,
+                                       struct device_attribute *devattr,
+                                       char *buf)
 {
        struct adt7475_data *data = adt7475_update_device(dev);
        return sprintf(buf, "%d\n", !!(data->config4 & CONFIG4_MAXDUTY));
 }
 
-static ssize_t set_pwm_at_crit(struct device *dev,
-                              struct device_attribute *devattr,
-                              const char *buf, size_t count)
+static ssize_t pwm_use_point2_pwm_at_crit_store(struct device *dev,
+                                       struct device_attribute *devattr,
+                                       const char *buf, size_t count)
 {
        struct i2c_client *client = to_i2c_client(dev);
        struct adt7475_data *data = i2c_get_clientdata(client);
@@ -888,15 +889,15 @@ static ssize_t set_pwm_at_crit(struct device *dev,
        return count;
 }
 
-static ssize_t show_vrm(struct device *dev, struct device_attribute *devattr,
+static ssize_t vrm_show(struct device *dev, struct device_attribute *devattr,
                        char *buf)
 {
        struct adt7475_data *data = dev_get_drvdata(dev);
        return sprintf(buf, "%d\n", (int)data->vrm);
 }
 
-static ssize_t set_vrm(struct device *dev, struct device_attribute *devattr,
-                      const char *buf, size_t count)
+static ssize_t vrm_store(struct device *dev, struct device_attribute *devattr,
+                        const char *buf, size_t count)
 {
        struct adt7475_data *data = dev_get_drvdata(dev);
        long val;
@@ -910,8 +911,8 @@ static ssize_t set_vrm(struct device *dev, struct device_attribute *devattr,
        return count;
 }
 
-static ssize_t show_vid(struct device *dev, struct device_attribute *devattr,
-                       char *buf)
+static ssize_t cpu0_vid_show(struct device *dev,
+                            struct device_attribute *devattr, char *buf)
 {
        struct adt7475_data *data = adt7475_update_device(dev);
        return sprintf(buf, "%d\n", vid_from_reg(data->vid, data->vrm));
@@ -1057,11 +1058,10 @@ static SENSOR_DEVICE_ATTR_2(pwm3_auto_point2_pwm, S_IRUGO | S_IWUSR, show_pwm,
                            set_pwm, MAX, 2);
 
 /* Non-standard name, might need revisiting */
-static DEVICE_ATTR(pwm_use_point2_pwm_at_crit, S_IWUSR | S_IRUGO,
-                  show_pwm_at_crit, set_pwm_at_crit);
+static DEVICE_ATTR_RW(pwm_use_point2_pwm_at_crit);
 
-static DEVICE_ATTR(vrm, S_IWUSR | S_IRUGO, show_vrm, set_vrm);
-static DEVICE_ATTR(cpu0_vid, S_IRUGO, show_vid, NULL);
+static DEVICE_ATTR_RW(vrm);
+static DEVICE_ATTR_RO(cpu0_vid);
 
 static struct attribute *adt7475_attrs[] = {
        &sensor_dev_attr_in1_input.dev_attr.attr,
index 98141f4831651a216230e40e27bd284cf7fff8b9..0f538f8be6bfb86e1c5e92835315586476fdeb3f 100644 (file)
@@ -331,9 +331,8 @@ static ssize_t adt7x10_show_alarm(struct device *dev,
        return sprintf(buf, "%d\n", !!(ret & attr->index));
 }
 
-static ssize_t adt7x10_show_name(struct device *dev,
-                                struct device_attribute *da,
-                                char *buf)
+static ssize_t name_show(struct device *dev, struct device_attribute *da,
+                        char *buf)
 {
        struct adt7x10_data *data = dev_get_drvdata(dev);
 
@@ -359,7 +358,7 @@ static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, adt7x10_show_alarm,
                          NULL, ADT7X10_STAT_T_HIGH);
 static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, adt7x10_show_alarm,
                          NULL, ADT7X10_STAT_T_CRIT);
-static DEVICE_ATTR(name, S_IRUGO, adt7x10_show_name, NULL);
+static DEVICE_ATTR_RO(name);
 
 static struct attribute *adt7x10_attributes[] = {
        &sensor_dev_attr_temp1_input.dev_attr.attr,
index 272fcc837ecc0ad62e3b5dc3b40aefc4c8472bec..62e19131113942ab797cd8f0c7658102758aff98 100644 (file)
@@ -483,25 +483,25 @@ sysfs_temp(3);
 sysfs_temp(4);
 
 /* VID */
-static ssize_t show_vid(struct device *dev, struct device_attribute *attr,
-               char *buf)
+static ssize_t cpu0_vid_show(struct device *dev,
+                            struct device_attribute *attr, char *buf)
 {
        struct asb100_data *data = asb100_update_device(dev);
        return sprintf(buf, "%d\n", vid_from_reg(data->vid, data->vrm));
 }
 
-static DEVICE_ATTR(cpu0_vid, S_IRUGO, show_vid, NULL);
+static DEVICE_ATTR_RO(cpu0_vid);
 
 /* VRM */
-static ssize_t show_vrm(struct device *dev, struct device_attribute *attr,
+static ssize_t vrm_show(struct device *dev, struct device_attribute *attr,
                char *buf)
 {
        struct asb100_data *data = dev_get_drvdata(dev);
        return sprintf(buf, "%d\n", data->vrm);
 }
 
-static ssize_t set_vrm(struct device *dev, struct device_attribute *attr,
-               const char *buf, size_t count)
+static ssize_t vrm_store(struct device *dev, struct device_attribute *attr,
+                        const char *buf, size_t count)
 {
        struct asb100_data *data = dev_get_drvdata(dev);
        unsigned long val;
@@ -519,16 +519,16 @@ static ssize_t set_vrm(struct device *dev, struct device_attribute *attr,
 }
 
 /* Alarms */
-static DEVICE_ATTR(vrm, S_IRUGO | S_IWUSR, show_vrm, set_vrm);
+static DEVICE_ATTR_RW(vrm);
 
-static ssize_t show_alarms(struct device *dev, struct device_attribute *attr,
+static ssize_t alarms_show(struct device *dev, struct device_attribute *attr,
                char *buf)
 {
        struct asb100_data *data = asb100_update_device(dev);
        return sprintf(buf, "%u\n", data->alarms);
 }
 
-static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
+static DEVICE_ATTR_RO(alarms);
 
 static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
                char *buf)
@@ -550,15 +550,15 @@ static SENSOR_DEVICE_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL, 5);
 static SENSOR_DEVICE_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL, 13);
 
 /* 1 PWM */
-static ssize_t show_pwm1(struct device *dev, struct device_attribute *attr,
+static ssize_t pwm1_show(struct device *dev, struct device_attribute *attr,
                char *buf)
 {
        struct asb100_data *data = asb100_update_device(dev);
        return sprintf(buf, "%d\n", ASB100_PWM_FROM_REG(data->pwm & 0x0f));
 }
 
-static ssize_t set_pwm1(struct device *dev, struct device_attribute *attr,
-               const char *buf, size_t count)
+static ssize_t pwm1_store(struct device *dev, struct device_attribute *attr,
+                         const char *buf, size_t count)
 {
        struct i2c_client *client = to_i2c_client(dev);
        struct asb100_data *data = i2c_get_clientdata(client);
@@ -577,15 +577,16 @@ static ssize_t set_pwm1(struct device *dev, struct device_attribute *attr,
        return count;
 }
 
-static ssize_t show_pwm_enable1(struct device *dev,
+static ssize_t pwm1_enable_show(struct device *dev,
                struct device_attribute *attr, char *buf)
 {
        struct asb100_data *data = asb100_update_device(dev);
        return sprintf(buf, "%d\n", (data->pwm & 0x80) ? 1 : 0);
 }
 
-static ssize_t set_pwm_enable1(struct device *dev,
-               struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t pwm1_enable_store(struct device *dev,
+                                struct device_attribute *attr,
+                                const char *buf, size_t count)
 {
        struct i2c_client *client = to_i2c_client(dev);
        struct asb100_data *data = i2c_get_clientdata(client);
@@ -604,9 +605,8 @@ static ssize_t set_pwm_enable1(struct device *dev,
        return count;
 }
 
-static DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, show_pwm1, set_pwm1);
-static DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR,
-               show_pwm_enable1, set_pwm_enable1);
+static DEVICE_ATTR_RW(pwm1);
+static DEVICE_ATTR_RW(pwm1_enable);
 
 static struct attribute *asb100_attributes[] = {
        &sensor_dev_attr_in0_input.dev_attr.attr,
index f2f2f2fc755a25b27303f4eeff6dac1130fe159c..b7eadb54c8cbedb52ff5c6aebf6b76ec8293fa42 100644 (file)
@@ -81,8 +81,8 @@ static struct atxp1_data *atxp1_update_device(struct device *dev)
 }
 
 /* sys file functions for cpu0_vid */
-static ssize_t atxp1_showvcore(struct device *dev,
-                              struct device_attribute *attr, char *buf)
+static ssize_t cpu0_vid_show(struct device *dev,
+                            struct device_attribute *attr, char *buf)
 {
        int size;
        struct atxp1_data *data;
@@ -95,9 +95,9 @@ static ssize_t atxp1_showvcore(struct device *dev,
        return size;
 }
 
-static ssize_t atxp1_storevcore(struct device *dev,
-                               struct device_attribute *attr,
-                               const char *buf, size_t count)
+static ssize_t cpu0_vid_store(struct device *dev,
+                             struct device_attribute *attr, const char *buf,
+                             size_t count)
 {
        struct atxp1_data *data = atxp1_update_device(dev);
        struct i2c_client *client = data->client;
@@ -154,12 +154,11 @@ static ssize_t atxp1_storevcore(struct device *dev,
  * CPU core reference voltage
  * unit: millivolt
  */
-static DEVICE_ATTR(cpu0_vid, S_IRUGO | S_IWUSR, atxp1_showvcore,
-                  atxp1_storevcore);
+static DEVICE_ATTR_RW(cpu0_vid);
 
 /* sys file functions for GPIO1 */
-static ssize_t atxp1_showgpio1(struct device *dev,
-                              struct device_attribute *attr, char *buf)
+static ssize_t gpio1_show(struct device *dev, struct device_attribute *attr,
+                         char *buf)
 {
        int size;
        struct atxp1_data *data;
@@ -171,9 +170,8 @@ static ssize_t atxp1_showgpio1(struct device *dev,
        return size;
 }
 
-static ssize_t atxp1_storegpio1(struct device *dev,
-                               struct device_attribute *attr, const char *buf,
-                               size_t count)
+static ssize_t gpio1_store(struct device *dev, struct device_attribute *attr,
+                          const char *buf, size_t count)
 {
        struct atxp1_data *data = atxp1_update_device(dev);
        struct i2c_client *client = data->client;
@@ -201,11 +199,11 @@ static ssize_t atxp1_storegpio1(struct device *dev,
  * GPIO1 data register
  * unit: Four bit as hex (e.g. 0x0f)
  */
-static DEVICE_ATTR(gpio1, S_IRUGO | S_IWUSR, atxp1_showgpio1, atxp1_storegpio1);
+static DEVICE_ATTR_RW(gpio1);
 
 /* sys file functions for GPIO2 */
-static ssize_t atxp1_showgpio2(struct device *dev,
-                              struct device_attribute *attr, char *buf)
+static ssize_t gpio2_show(struct device *dev, struct device_attribute *attr,
+                         char *buf)
 {
        int size;
        struct atxp1_data *data;
@@ -217,9 +215,8 @@ static ssize_t atxp1_showgpio2(struct device *dev,
        return size;
 }
 
-static ssize_t atxp1_storegpio2(struct device *dev,
-                               struct device_attribute *attr,
-                               const char *buf, size_t count)
+static ssize_t gpio2_store(struct device *dev, struct device_attribute *attr,
+                          const char *buf, size_t count)
 {
        struct atxp1_data *data = atxp1_update_device(dev);
        struct i2c_client *client = data->client;
@@ -246,7 +243,7 @@ static ssize_t atxp1_storegpio2(struct device *dev,
  * GPIO2 data register
  * unit: Eight bit as hex (e.g. 0xff)
  */
-static DEVICE_ATTR(gpio2, S_IRUGO | S_IWUSR, atxp1_showgpio2, atxp1_storegpio2);
+static DEVICE_ATTR_RW(gpio2);
 
 static struct attribute *atxp1_attrs[] = {
        &dev_attr_gpio1.attr,
index 8763c4a8280ce753282f9e147f910aa975841efc..aa40a00ad689b72d02b289e4b2aa636f25561c48 100644 (file)
@@ -279,7 +279,8 @@ static inline int IN_FROM_REG(int reg, int nominal, int res)
 
 static inline int IN_TO_REG(long val, int nominal)
 {
-       return clamp_val((val * 192 + nominal / 2) / nominal, 0, 255);
+       val = clamp_val(val, 0, 255 * nominal / 192);
+       return DIV_ROUND_CLOSEST(val * 192, nominal);
 }
 
 /*
@@ -295,7 +296,8 @@ static inline int TEMP_FROM_REG(int reg, int res)
 
 static inline int TEMP_TO_REG(long val)
 {
-       return clamp_val((val < 0 ? val - 500 : val + 500) / 1000, -128, 127);
+       val = clamp_val(val, -128000, 127000);
+       return DIV_ROUND_CLOSEST(val, 1000);
 }
 
 /* Temperature range */
@@ -331,9 +333,10 @@ static inline int TEMP_HYST_FROM_REG(int reg, int ix)
        return (((ix == 1) ? reg : reg >> 4) & 0x0f) * 1000;
 }
 
-static inline int TEMP_HYST_TO_REG(long val, int ix, int reg)
+static inline int TEMP_HYST_TO_REG(int temp, long hyst, int ix, int reg)
 {
-       int hyst = clamp_val((val + 500) / 1000, 0, 15);
+       hyst = clamp_val(hyst, temp - 15000, temp);
+       hyst = DIV_ROUND_CLOSEST(temp - hyst, 1000);
 
        return (ix == 1) ? (reg & 0xf0) | hyst : (reg & 0x0f) | (hyst << 4);
 }
@@ -1022,7 +1025,9 @@ static ssize_t set_zone(struct device *dev, struct device_attribute *attr,
        int ix = sensor_attr_2->index;
        int fn = sensor_attr_2->nr;
        long val;
+       int temp;
        int err;
+       u8 reg;
 
        err = kstrtol(buf, 10, &val);
        if (err)
@@ -1035,10 +1040,9 @@ static ssize_t set_zone(struct device *dev, struct device_attribute *attr,
                data->zone_low[ix] = dme1737_read(data,
                                                  DME1737_REG_ZONE_LOW(ix));
                /* Modify the temp hyst value */
-               data->zone_hyst[ix == 2] = TEMP_HYST_TO_REG(
-                                       TEMP_FROM_REG(data->zone_low[ix], 8) -
-                                       val, ix, dme1737_read(data,
-                                       DME1737_REG_ZONE_HYST(ix == 2)));
+               temp = TEMP_FROM_REG(data->zone_low[ix], 8);
+               reg = dme1737_read(data, DME1737_REG_ZONE_HYST(ix == 2));
+               data->zone_hyst[ix == 2] = TEMP_HYST_TO_REG(temp, val, ix, reg);
                dme1737_write(data, DME1737_REG_ZONE_HYST(ix == 2),
                              data->zone_hyst[ix == 2]);
                break;
@@ -1055,10 +1059,10 @@ static ssize_t set_zone(struct device *dev, struct device_attribute *attr,
                 * Modify the temp range value (which is stored in the upper
                 * nibble of the pwm_freq register)
                 */
-               data->pwm_freq[ix] = TEMP_RANGE_TO_REG(val -
-                                       TEMP_FROM_REG(data->zone_low[ix], 8),
-                                       dme1737_read(data,
-                                       DME1737_REG_PWM_FREQ(ix)));
+               temp = TEMP_FROM_REG(data->zone_low[ix], 8);
+               val = clamp_val(val, temp, temp + 80000);
+               reg = dme1737_read(data, DME1737_REG_PWM_FREQ(ix));
+               data->pwm_freq[ix] = TEMP_RANGE_TO_REG(val - temp, reg);
                dme1737_write(data, DME1737_REG_PWM_FREQ(ix),
                              data->pwm_freq[ix]);
                break;
@@ -1468,7 +1472,7 @@ exit:
  * Miscellaneous sysfs attributes
  * --------------------------------------------------------------------- */
 
-static ssize_t show_vrm(struct device *dev, struct device_attribute *attr,
+static ssize_t vrm_show(struct device *dev, struct device_attribute *attr,
                        char *buf)
 {
        struct i2c_client *client = to_i2c_client(dev);
@@ -1477,8 +1481,8 @@ static ssize_t show_vrm(struct device *dev, struct device_attribute *attr,
        return sprintf(buf, "%d\n", data->vrm);
 }
 
-static ssize_t set_vrm(struct device *dev, struct device_attribute *attr,
-                      const char *buf, size_t count)
+static ssize_t vrm_store(struct device *dev, struct device_attribute *attr,
+                        const char *buf, size_t count)
 {
        struct dme1737_data *data = dev_get_drvdata(dev);
        unsigned long val;
@@ -1495,15 +1499,15 @@ static ssize_t set_vrm(struct device *dev, struct device_attribute *attr,
        return count;
 }
 
-static ssize_t show_vid(struct device *dev, struct device_attribute *attr,
-                       char *buf)
+static ssize_t cpu0_vid_show(struct device *dev,
+                            struct device_attribute *attr, char *buf)
 {
        struct dme1737_data *data = dme1737_update_device(dev);
 
        return sprintf(buf, "%d\n", vid_from_reg(data->vid, data->vrm));
 }
 
-static ssize_t show_name(struct device *dev, struct device_attribute *attr,
+static ssize_t name_show(struct device *dev, struct device_attribute *attr,
                         char *buf)
 {
        struct dme1737_data *data = dev_get_drvdata(dev);
@@ -1645,9 +1649,9 @@ SENSOR_DEVICE_ATTR_PWM_5TO6(6);
 
 /* Misc */
 
-static DEVICE_ATTR(vrm, S_IRUGO | S_IWUSR, show_vrm, set_vrm);
-static DEVICE_ATTR(cpu0_vid, S_IRUGO, show_vid, NULL);
-static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);   /* for ISA devices */
+static DEVICE_ATTR_RW(vrm);
+static DEVICE_ATTR_RO(cpu0_vid);
+static DEVICE_ATTR_RO(name);   /* for ISA devices */
 
 /*
  * This struct holds all the attributes that are always present and need to be
index 8890870309e4db7f5f875c202756006b3323538b..5c317fc32a4aaa2b351d7b15b4be531de7347a64 100644 (file)
@@ -263,7 +263,7 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *da,
        return count;
 }
 
-static ssize_t show_alarms(struct device *dev, struct device_attribute *da,
+static ssize_t alarms_show(struct device *dev, struct device_attribute *da,
                           char *buf)
 {
        struct ds1621_data *data = ds1621_update_client(dev);
@@ -278,15 +278,16 @@ static ssize_t show_alarm(struct device *dev, struct device_attribute *da,
        return sprintf(buf, "%d\n", !!(data->conf & attr->index));
 }
 
-static ssize_t show_convrate(struct device *dev, struct device_attribute *da,
-                         char *buf)
+static ssize_t update_interval_show(struct device *dev,
+                                   struct device_attribute *da, char *buf)
 {
        struct ds1621_data *data = dev_get_drvdata(dev);
        return scnprintf(buf, PAGE_SIZE, "%hu\n", data->update_interval);
 }
 
-static ssize_t set_convrate(struct device *dev, struct device_attribute *da,
-                           const char *buf, size_t count)
+static ssize_t update_interval_store(struct device *dev,
+                                    struct device_attribute *da,
+                                    const char *buf, size_t count)
 {
        struct ds1621_data *data = dev_get_drvdata(dev);
        struct i2c_client *client = data->client;
@@ -315,9 +316,8 @@ static ssize_t set_convrate(struct device *dev, struct device_attribute *da,
        return count;
 }
 
-static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
-static DEVICE_ATTR(update_interval, S_IWUSR | S_IRUGO, show_convrate,
-                  set_convrate);
+static DEVICE_ATTR_RO(alarms);
+static DEVICE_ATTR_RW(update_interval);
 
 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0);
 static SENSOR_DEVICE_ATTR(temp1_min, S_IWUSR | S_IRUGO, show_temp, set_temp, 1);
index 4b870ee9b0d3d934748632d4d33309f05d0d5983..1ed9a7aa953dbb1560ae1fb626b2d529f6d2ecc9 100644 (file)
@@ -284,7 +284,7 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *da,
 }
 
 static ssize_t
-show_fan(struct device *dev, struct device_attribute *da, char *buf)
+fan1_input_show(struct device *dev, struct device_attribute *da, char *buf)
 {
        struct emc2103_data *data = emc2103_update_device(dev);
        int rpm = 0;
@@ -294,7 +294,7 @@ show_fan(struct device *dev, struct device_attribute *da, char *buf)
 }
 
 static ssize_t
-show_fan_div(struct device *dev, struct device_attribute *da, char *buf)
+fan1_div_show(struct device *dev, struct device_attribute *da, char *buf)
 {
        struct emc2103_data *data = emc2103_update_device(dev);
        int fan_div = 8 / data->fan_multiplier;
@@ -307,8 +307,8 @@ show_fan_div(struct device *dev, struct device_attribute *da, char *buf)
  * of least surprise; the user doesn't expect the fan target to change just
  * because the divider changed.
  */
-static ssize_t set_fan_div(struct device *dev, struct device_attribute *da,
-                          const char *buf, size_t count)
+static ssize_t fan1_div_store(struct device *dev, struct device_attribute *da,
+                             const char *buf, size_t count)
 {
        struct emc2103_data *data = emc2103_update_device(dev);
        struct i2c_client *client = data->client;
@@ -369,7 +369,7 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *da,
 }
 
 static ssize_t
-show_fan_target(struct device *dev, struct device_attribute *da, char *buf)
+fan1_target_show(struct device *dev, struct device_attribute *da, char *buf)
 {
        struct emc2103_data *data = emc2103_update_device(dev);
        int rpm = 0;
@@ -382,8 +382,9 @@ show_fan_target(struct device *dev, struct device_attribute *da, char *buf)
        return sprintf(buf, "%d\n", rpm);
 }
 
-static ssize_t set_fan_target(struct device *dev, struct device_attribute *da,
-                             const char *buf, size_t count)
+static ssize_t fan1_target_store(struct device *dev,
+                                struct device_attribute *da, const char *buf,
+                                size_t count)
 {
        struct emc2103_data *data = emc2103_update_device(dev);
        struct i2c_client *client = data->client;
@@ -412,7 +413,7 @@ static ssize_t set_fan_target(struct device *dev, struct device_attribute *da,
 }
 
 static ssize_t
-show_fan_fault(struct device *dev, struct device_attribute *da, char *buf)
+fan1_fault_show(struct device *dev, struct device_attribute *da, char *buf)
 {
        struct emc2103_data *data = emc2103_update_device(dev);
        bool fault = ((data->fan_tach & 0x1fe0) == 0x1fe0);
@@ -420,14 +421,15 @@ show_fan_fault(struct device *dev, struct device_attribute *da, char *buf)
 }
 
 static ssize_t
-show_pwm_enable(struct device *dev, struct device_attribute *da, char *buf)
+pwm1_enable_show(struct device *dev, struct device_attribute *da, char *buf)
 {
        struct emc2103_data *data = emc2103_update_device(dev);
        return sprintf(buf, "%d\n", data->fan_rpm_control ? 3 : 0);
 }
 
-static ssize_t set_pwm_enable(struct device *dev, struct device_attribute *da,
-                             const char *buf, size_t count)
+static ssize_t pwm1_enable_store(struct device *dev,
+                                struct device_attribute *da, const char *buf,
+                                size_t count)
 {
        struct emc2103_data *data = dev_get_drvdata(dev);
        struct i2c_client *client = data->client;
@@ -512,14 +514,12 @@ static SENSOR_DEVICE_ATTR(temp4_min_alarm, S_IRUGO, show_temp_min_alarm,
 static SENSOR_DEVICE_ATTR(temp4_max_alarm, S_IRUGO, show_temp_max_alarm,
        NULL, 3);
 
-static DEVICE_ATTR(fan1_input, S_IRUGO, show_fan, NULL);
-static DEVICE_ATTR(fan1_div, S_IRUGO | S_IWUSR, show_fan_div, set_fan_div);
-static DEVICE_ATTR(fan1_target, S_IRUGO | S_IWUSR, show_fan_target,
-       set_fan_target);
-static DEVICE_ATTR(fan1_fault, S_IRUGO, show_fan_fault, NULL);
+static DEVICE_ATTR_RO(fan1_input);
+static DEVICE_ATTR_RW(fan1_div);
+static DEVICE_ATTR_RW(fan1_target);
+static DEVICE_ATTR_RO(fan1_fault);
 
-static DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, show_pwm_enable,
-       set_pwm_enable);
+static DEVICE_ATTR_RW(pwm1_enable);
 
 /* sensors present on all models */
 static struct attribute *emc2103_attributes[] = {
index facd05cda26da38dedd491d787dd7aff3fc02281..73c681162653bf5ff5be48b3d0f1d327f1511f68 100644 (file)
@@ -946,7 +946,7 @@ static ssize_t set_temp_hyst(struct device *dev, struct device_attribute
        return count;
 }
 
-static ssize_t show_alarms_in(struct device *dev, struct device_attribute
+static ssize_t alarms_in_show(struct device *dev, struct device_attribute
                              *devattr, char *buf)
 {
        struct f71805f_data *data = f71805f_update_device(dev);
@@ -954,7 +954,7 @@ static ssize_t show_alarms_in(struct device *dev, struct device_attribute
        return sprintf(buf, "%lu\n", data->alarms & 0x7ff);
 }
 
-static ssize_t show_alarms_fan(struct device *dev, struct device_attribute
+static ssize_t alarms_fan_show(struct device *dev, struct device_attribute
                               *devattr, char *buf)
 {
        struct f71805f_data *data = f71805f_update_device(dev);
@@ -962,7 +962,7 @@ static ssize_t show_alarms_fan(struct device *dev, struct device_attribute
        return sprintf(buf, "%lu\n", (data->alarms >> 16) & 0x07);
 }
 
-static ssize_t show_alarms_temp(struct device *dev, struct device_attribute
+static ssize_t alarms_temp_show(struct device *dev, struct device_attribute
                                *devattr, char *buf)
 {
        struct f71805f_data *data = f71805f_update_device(dev);
@@ -980,7 +980,7 @@ static ssize_t show_alarm(struct device *dev, struct device_attribute
        return sprintf(buf, "%lu\n", (data->alarms >> bitnr) & 1);
 }
 
-static ssize_t show_name(struct device *dev, struct device_attribute
+static ssize_t name_show(struct device *dev, struct device_attribute
                         *devattr, char *buf)
 {
        struct f71805f_data *data = dev_get_drvdata(dev);
@@ -1176,11 +1176,11 @@ static SENSOR_DEVICE_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL, 13);
 static SENSOR_DEVICE_ATTR(fan1_alarm, S_IRUGO, show_alarm, NULL, 16);
 static SENSOR_DEVICE_ATTR(fan2_alarm, S_IRUGO, show_alarm, NULL, 17);
 static SENSOR_DEVICE_ATTR(fan3_alarm, S_IRUGO, show_alarm, NULL, 18);
-static DEVICE_ATTR(alarms_in, S_IRUGO, show_alarms_in, NULL);
-static DEVICE_ATTR(alarms_fan, S_IRUGO, show_alarms_fan, NULL);
-static DEVICE_ATTR(alarms_temp, S_IRUGO, show_alarms_temp, NULL);
+static DEVICE_ATTR_RO(alarms_in);
+static DEVICE_ATTR_RO(alarms_fan);
+static DEVICE_ATTR_RO(alarms_temp);
 
-static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
+static DEVICE_ATTR_RO(name);
 
 static struct attribute *f71805f_attributes[] = {
        &sensor_dev_attr_in0_input.dev_attr.attr,
index cb28e4b4fb108567c508914fcf21bfb440c7c4da..ca54ce5c8e10c0ef04261b5ef804b56b0e888cae 100644 (file)
@@ -390,7 +390,7 @@ static ssize_t show_pwm_auto_point_temp(struct device *dev,
 static ssize_t store_pwm_auto_point_temp(struct device *dev,
        struct device_attribute *devattr, const char *buf, size_t count);
 /* Sysfs misc */
-static ssize_t show_name(struct device *dev, struct device_attribute *devattr,
+static ssize_t name_show(struct device *dev, struct device_attribute *devattr,
        char *buf);
 
 static int f71882fg_probe(struct platform_device *pdev);
@@ -404,7 +404,7 @@ static struct platform_driver f71882fg_driver = {
        .remove         = f71882fg_remove,
 };
 
-static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
+static DEVICE_ATTR_RO(name);
 
 /*
  * Temp attr for the f71858fg, the f71858fg is special as it has its
@@ -2212,7 +2212,7 @@ static ssize_t store_pwm_auto_point_temp(struct device *dev,
        return count;
 }
 
-static ssize_t show_name(struct device *dev, struct device_attribute *devattr,
+static ssize_t name_show(struct device *dev, struct device_attribute *devattr,
        char *buf)
 {
        struct f71882fg_data *data = dev_get_drvdata(dev);
index 15aa49d082c45752fd2751f9229d16bc93196868..9545a346044fc15185f9b0352d45949b687a5fac 100644 (file)
@@ -83,8 +83,8 @@ static bool is_carrizo_or_later(void)
        return boot_cpu_data.x86 == 0x15 && boot_cpu_data.x86_model >= 0x60;
 }
 
-static ssize_t show_power(struct device *dev,
-                         struct device_attribute *attr, char *buf)
+static ssize_t power1_input_show(struct device *dev,
+                                struct device_attribute *attr, char *buf)
 {
        u32 val, tdp_limit, running_avg_range;
        s32 running_avg_capture;
@@ -136,16 +136,16 @@ static ssize_t show_power(struct device *dev,
        curr_pwr_watts = (curr_pwr_watts * 15625) >> (10 + running_avg_range);
        return sprintf(buf, "%u\n", (unsigned int) curr_pwr_watts);
 }
-static DEVICE_ATTR(power1_input, S_IRUGO, show_power, NULL);
+static DEVICE_ATTR_RO(power1_input);
 
-static ssize_t show_power_crit(struct device *dev,
-                              struct device_attribute *attr, char *buf)
+static ssize_t power1_crit_show(struct device *dev,
+                               struct device_attribute *attr, char *buf)
 {
        struct fam15h_power_data *data = dev_get_drvdata(dev);
 
        return sprintf(buf, "%u\n", data->processor_pwr_watts);
 }
-static DEVICE_ATTR(power1_crit, S_IRUGO, show_power_crit, NULL);
+static DEVICE_ATTR_RO(power1_crit);
 
 static void do_read_registers_on_cu(void *_data)
 {
@@ -212,9 +212,8 @@ static int read_registers(struct fam15h_power_data *data)
        return 0;
 }
 
-static ssize_t acc_show_power(struct device *dev,
-                             struct device_attribute *attr,
-                             char *buf)
+static ssize_t power1_average_show(struct device *dev,
+                                  struct device_attribute *attr, char *buf)
 {
        struct fam15h_power_data *data = dev_get_drvdata(dev);
        u64 prev_cu_acc_power[MAX_CUS], prev_ptsc[MAX_CUS],
@@ -267,20 +266,20 @@ static ssize_t acc_show_power(struct device *dev,
 
        return sprintf(buf, "%llu\n", (unsigned long long)avg_acc);
 }
-static DEVICE_ATTR(power1_average, S_IRUGO, acc_show_power, NULL);
+static DEVICE_ATTR_RO(power1_average);
 
-static ssize_t acc_show_power_period(struct device *dev,
-                                    struct device_attribute *attr,
-                                    char *buf)
+static ssize_t power1_average_interval_show(struct device *dev,
+                                           struct device_attribute *attr,
+                                           char *buf)
 {
        struct fam15h_power_data *data = dev_get_drvdata(dev);
 
        return sprintf(buf, "%lu\n", data->power_period);
 }
 
-static ssize_t acc_set_power_period(struct device *dev,
-                                   struct device_attribute *attr,
-                                   const char *buf, size_t count)
+static ssize_t power1_average_interval_store(struct device *dev,
+                                            struct device_attribute *attr,
+                                            const char *buf, size_t count)
 {
        struct fam15h_power_data *data = dev_get_drvdata(dev);
        unsigned long temp;
@@ -301,8 +300,7 @@ static ssize_t acc_set_power_period(struct device *dev,
 
        return count;
 }
-static DEVICE_ATTR(power1_average_interval, S_IRUGO | S_IWUSR,
-                  acc_show_power_period, acc_set_power_period);
+static DEVICE_ATTR_RW(power1_average_interval);
 
 static int fam15h_power_init_attrs(struct pci_dev *pdev,
                                   struct fam15h_power_data *data)
index d58abdc5a4cff74e961f79a385d439b4f02cb09b..5e78229ade049f41cc1277da5d0af85f59f45e45 100644 (file)
@@ -561,7 +561,7 @@ static ssize_t store_pwm_auto_point1_pwm(struct device *dev,
  * The FSC hwmon family has the ability to force an attached alert led to flash
  * from software, we export this as an alert_led sysfs attr
  */
-static ssize_t show_alert_led(struct device *dev,
+static ssize_t alert_led_show(struct device *dev,
        struct device_attribute *devattr, char *buf)
 {
        struct fschmd_data *data = fschmd_update_device(dev);
@@ -572,7 +572,7 @@ static ssize_t show_alert_led(struct device *dev,
                return sprintf(buf, "0\n");
 }
 
-static ssize_t store_alert_led(struct device *dev,
+static ssize_t alert_led_store(struct device *dev,
        struct device_attribute *devattr, const char *buf, size_t count)
 {
        u8 reg;
@@ -602,7 +602,7 @@ static ssize_t store_alert_led(struct device *dev,
        return count;
 }
 
-static DEVICE_ATTR(alert_led, 0644, show_alert_led, store_alert_led);
+static DEVICE_ATTR_RW(alert_led);
 
 static struct sensor_device_attribute fschmd_attr[] = {
        SENSOR_ATTR(in0_input, 0444, show_in_value, NULL, 0),
index ec6a77da411a19fac3a762522670b0b808b30e21..7be1371b2c3d3bdeddffa7fb9e4b0a89ee0f7819 100644 (file)
@@ -107,8 +107,8 @@ static struct g760a_data *g760a_update_client(struct device *dev)
        return data;
 }
 
-static ssize_t show_fan(struct device *dev, struct device_attribute *da,
-                       char *buf)
+static ssize_t fan1_input_show(struct device *dev,
+                              struct device_attribute *da, char *buf)
 {
        struct g760a_data *data = g760a_update_client(dev);
        unsigned int rpm = 0;
@@ -121,8 +121,8 @@ static ssize_t show_fan(struct device *dev, struct device_attribute *da,
        return sprintf(buf, "%d\n", rpm);
 }
 
-static ssize_t show_fan_alarm(struct device *dev, struct device_attribute *da,
-                             char *buf)
+static ssize_t fan1_alarm_show(struct device *dev,
+                              struct device_attribute *da, char *buf)
 {
        struct g760a_data *data = g760a_update_client(dev);
 
@@ -131,16 +131,16 @@ static ssize_t show_fan_alarm(struct device *dev, struct device_attribute *da,
        return sprintf(buf, "%d\n", fan_alarm);
 }
 
-static ssize_t get_pwm(struct device *dev, struct device_attribute *da,
-                      char *buf)
+static ssize_t pwm1_show(struct device *dev, struct device_attribute *da,
+                        char *buf)
 {
        struct g760a_data *data = g760a_update_client(dev);
 
        return sprintf(buf, "%d\n", PWM_FROM_CNT(data->set_cnt));
 }
 
-static ssize_t set_pwm(struct device *dev, struct device_attribute *da,
-                      const char *buf, size_t count)
+static ssize_t pwm1_store(struct device *dev, struct device_attribute *da,
+                         const char *buf, size_t count)
 {
        struct g760a_data *data = g760a_update_client(dev);
        struct i2c_client *client = data->client;
@@ -157,9 +157,9 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *da,
        return count;
 }
 
-static DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, get_pwm, set_pwm);
-static DEVICE_ATTR(fan1_input, S_IRUGO, show_fan, NULL);
-static DEVICE_ATTR(fan1_alarm, S_IRUGO, show_fan_alarm, NULL);
+static DEVICE_ATTR_RW(pwm1);
+static DEVICE_ATTR_RO(fan1_input);
+static DEVICE_ATTR_RO(fan1_alarm);
 
 static struct attribute *g760a_attrs[] = {
        &dev_attr_pwm1.attr,
index 628be9c95ff9dbe18d846ce7e5fd3266a96ebe62..6dca2fd3d3036d9bf81bff7f794315375ec6ef67 100644 (file)
@@ -738,8 +738,8 @@ static int g762_pdata_prop_import(struct i2c_client *client)
  * Read function for fan1_input sysfs file. Return current fan RPM value, or
  * 0 if fan is out of control.
  */
-static ssize_t get_fan_rpm(struct device *dev, struct device_attribute *da,
-                          char *buf)
+static ssize_t fan1_input_show(struct device *dev,
+                              struct device_attribute *da, char *buf)
 {
        struct g762_data *data = g762_update_client(dev);
        unsigned int rpm = 0;
@@ -764,8 +764,8 @@ static ssize_t get_fan_rpm(struct device *dev, struct device_attribute *da,
  * Read and write functions for pwm1_mode sysfs file. Get and set fan speed
  * control mode i.e. PWM (1) or DC (0).
  */
-static ssize_t get_pwm_mode(struct device *dev, struct device_attribute *da,
-                           char *buf)
+static ssize_t pwm1_mode_show(struct device *dev, struct device_attribute *da,
+                             char *buf)
 {
        struct g762_data *data = g762_update_client(dev);
 
@@ -776,8 +776,9 @@ static ssize_t get_pwm_mode(struct device *dev, struct device_attribute *da,
                       !!(data->fan_cmd1 & G762_REG_FAN_CMD1_OUT_MODE));
 }
 
-static ssize_t set_pwm_mode(struct device *dev, struct device_attribute *da,
-                           const char *buf, size_t count)
+static ssize_t pwm1_mode_store(struct device *dev,
+                              struct device_attribute *da, const char *buf,
+                              size_t count)
 {
        unsigned long val;
        int ret;
@@ -796,8 +797,8 @@ static ssize_t set_pwm_mode(struct device *dev, struct device_attribute *da,
  * Read and write functions for fan1_div sysfs file. Get and set fan
  * controller prescaler value
  */
-static ssize_t get_fan_div(struct device *dev,
-                          struct device_attribute *da, char *buf)
+static ssize_t fan1_div_show(struct device *dev, struct device_attribute *da,
+                            char *buf)
 {
        struct g762_data *data = g762_update_client(dev);
 
@@ -807,9 +808,8 @@ static ssize_t get_fan_div(struct device *dev,
        return sprintf(buf, "%d\n", G762_CLKDIV_FROM_REG(data->fan_cmd1));
 }
 
-static ssize_t set_fan_div(struct device *dev,
-                          struct device_attribute *da,
-                          const char *buf, size_t count)
+static ssize_t fan1_div_store(struct device *dev, struct device_attribute *da,
+                             const char *buf, size_t count)
 {
        unsigned long val;
        int ret;
@@ -828,8 +828,8 @@ static ssize_t set_fan_div(struct device *dev,
  * Read and write functions for fan1_pulses sysfs file. Get and set number
  * of tachometer pulses per fan revolution.
  */
-static ssize_t get_fan_pulses(struct device *dev,
-                             struct device_attribute *da, char *buf)
+static ssize_t fan1_pulses_show(struct device *dev,
+                               struct device_attribute *da, char *buf)
 {
        struct g762_data *data = g762_update_client(dev);
 
@@ -839,9 +839,9 @@ static ssize_t get_fan_pulses(struct device *dev,
        return sprintf(buf, "%d\n", G762_PULSE_FROM_REG(data->fan_cmd1));
 }
 
-static ssize_t set_fan_pulses(struct device *dev,
-                             struct device_attribute *da,
-                             const char *buf, size_t count)
+static ssize_t fan1_pulses_store(struct device *dev,
+                                struct device_attribute *da, const char *buf,
+                                size_t count)
 {
        unsigned long val;
        int ret;
@@ -870,8 +870,8 @@ static ssize_t set_fan_pulses(struct device *dev,
  * but we do not accept 0 as this mode is not natively supported by the chip
  * and it is not emulated by g762 driver. -EINVAL is returned in this case.
  */
-static ssize_t get_pwm_enable(struct device *dev,
-                             struct device_attribute *da, char *buf)
+static ssize_t pwm1_enable_show(struct device *dev,
+                               struct device_attribute *da, char *buf)
 {
        struct g762_data *data = g762_update_client(dev);
 
@@ -882,9 +882,9 @@ static ssize_t get_pwm_enable(struct device *dev,
                       (!!(data->fan_cmd1 & G762_REG_FAN_CMD1_FAN_MODE)) + 1);
 }
 
-static ssize_t set_pwm_enable(struct device *dev,
-                             struct device_attribute *da,
-                             const char *buf, size_t count)
+static ssize_t pwm1_enable_store(struct device *dev,
+                                struct device_attribute *da, const char *buf,
+                                size_t count)
 {
        unsigned long val;
        int ret;
@@ -904,8 +904,8 @@ static ssize_t set_pwm_enable(struct device *dev,
  * (which affects fan speed) in open-loop mode. 0 stops the fan and 255
  * makes it run at full speed.
  */
-static ssize_t get_pwm(struct device *dev, struct device_attribute *da,
-                      char *buf)
+static ssize_t pwm1_show(struct device *dev, struct device_attribute *da,
+                        char *buf)
 {
        struct g762_data *data = g762_update_client(dev);
 
@@ -915,8 +915,8 @@ static ssize_t get_pwm(struct device *dev, struct device_attribute *da,
        return sprintf(buf, "%d\n", data->set_out);
 }
 
-static ssize_t set_pwm(struct device *dev, struct device_attribute *da,
-                      const char *buf, size_t count)
+static ssize_t pwm1_store(struct device *dev, struct device_attribute *da,
+                         const char *buf, size_t count)
 {
        unsigned long val;
        int ret;
@@ -942,8 +942,8 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *da,
  * Also note that due to rounding errors it is possible that you don't read
  * back exactly the value you have set.
  */
-static ssize_t get_fan_target(struct device *dev, struct device_attribute *da,
-                             char *buf)
+static ssize_t fan1_target_show(struct device *dev,
+                               struct device_attribute *da, char *buf)
 {
        struct g762_data *data = g762_update_client(dev);
        unsigned int rpm;
@@ -961,8 +961,9 @@ static ssize_t get_fan_target(struct device *dev, struct device_attribute *da,
        return sprintf(buf, "%u\n", rpm);
 }
 
-static ssize_t set_fan_target(struct device *dev, struct device_attribute *da,
-                             const char *buf, size_t count)
+static ssize_t fan1_target_store(struct device *dev,
+                                struct device_attribute *da, const char *buf,
+                                size_t count)
 {
        unsigned long val;
        int ret;
@@ -978,7 +979,7 @@ static ssize_t set_fan_target(struct device *dev, struct device_attribute *da,
 }
 
 /* read function for fan1_fault sysfs file. */
-static ssize_t get_fan_failure(struct device *dev, struct device_attribute *da,
+static ssize_t fan1_fault_show(struct device *dev, struct device_attribute *da,
                               char *buf)
 {
        struct g762_data *data = g762_update_client(dev);
@@ -993,8 +994,8 @@ static ssize_t get_fan_failure(struct device *dev, struct device_attribute *da,
  * read function for fan1_alarm sysfs file. Note that OOC condition is
  * enabled low
  */
-static ssize_t get_fan_ooc(struct device *dev, struct device_attribute *da,
-                          char *buf)
+static ssize_t fan1_alarm_show(struct device *dev,
+                              struct device_attribute *da, char *buf)
 {
        struct g762_data *data = g762_update_client(dev);
 
@@ -1004,18 +1005,15 @@ static ssize_t get_fan_ooc(struct device *dev, struct device_attribute *da,
        return sprintf(buf, "%u\n", !(data->fan_sta & G762_REG_FAN_STA_OOC));
 }
 
-static DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, get_pwm, set_pwm);
-static DEVICE_ATTR(pwm1_mode, S_IWUSR | S_IRUGO, get_pwm_mode, set_pwm_mode);
-static DEVICE_ATTR(pwm1_enable, S_IWUSR | S_IRUGO,
-                  get_pwm_enable, set_pwm_enable);
-static DEVICE_ATTR(fan1_input, S_IRUGO, get_fan_rpm, NULL);
-static DEVICE_ATTR(fan1_alarm, S_IRUGO, get_fan_ooc, NULL);
-static DEVICE_ATTR(fan1_fault, S_IRUGO, get_fan_failure, NULL);
-static DEVICE_ATTR(fan1_target, S_IWUSR | S_IRUGO,
-                  get_fan_target, set_fan_target);
-static DEVICE_ATTR(fan1_div, S_IWUSR | S_IRUGO, get_fan_div, set_fan_div);
-static DEVICE_ATTR(fan1_pulses, S_IWUSR | S_IRUGO,
-                  get_fan_pulses, set_fan_pulses);
+static DEVICE_ATTR_RW(pwm1);
+static DEVICE_ATTR_RW(pwm1_mode);
+static DEVICE_ATTR_RW(pwm1_enable);
+static DEVICE_ATTR_RO(fan1_input);
+static DEVICE_ATTR_RO(fan1_alarm);
+static DEVICE_ATTR_RO(fan1_fault);
+static DEVICE_ATTR_RW(fan1_target);
+static DEVICE_ATTR_RW(fan1_div);
+static DEVICE_ATTR_RW(fan1_pulses);
 
 /* Driver data */
 static struct attribute *g762_attrs[] = {
index 0212c8317bca5b33a7620b798e8052ff204af8af..b267510daeb2b7bc1129b68d10079a91a1b9d2cb 100644 (file)
@@ -86,9 +86,8 @@ enum chips { gl518sm_r00, gl518sm_r80 };
 #define BOOL_FROM_REG(val)     ((val) ? 0 : 1)
 #define BOOL_TO_REG(val)       ((val) ? 0 : 1)
 
-#define TEMP_TO_REG(val)       clamp_val(((((val) < 0 ? \
-                               (val) - 500 : \
-                               (val) + 500) / 1000) + 119), 0, 255)
+#define TEMP_CLAMP(val)                clamp_val(val, -119000, 136000)
+#define TEMP_TO_REG(val)       (DIV_ROUND_CLOSEST(TEMP_CLAMP(val), 1000) + 119)
 #define TEMP_FROM_REG(val)     (((val) - 119) * 1000)
 
 static inline u8 FAN_TO_REG(long rpm, int div)
@@ -101,11 +100,13 @@ static inline u8 FAN_TO_REG(long rpm, int div)
 }
 #define FAN_FROM_REG(val, div) ((val) == 0 ? 0 : (480000 / ((val) * (div))))
 
-#define IN_TO_REG(val)         clamp_val((((val) + 9) / 19), 0, 255)
+#define IN_CLAMP(val)          clamp_val(val, 0, 255 * 19)
+#define IN_TO_REG(val)         DIV_ROUND_CLOSEST(IN_CLAMP(val), 19)
 #define IN_FROM_REG(val)       ((val) * 19)
 
-#define VDD_TO_REG(val)                clamp_val((((val) * 4 + 47) / 95), 0, 255)
-#define VDD_FROM_REG(val)      (((val) * 95 + 2) / 4)
+#define VDD_CLAMP(val)         clamp_val(val, 0, 255 * 95 / 4)
+#define VDD_TO_REG(val)                DIV_ROUND_CLOSEST(VDD_CLAMP(val) * 4, 95)
+#define VDD_FROM_REG(val)      DIV_ROUND_CLOSEST((val) * 95, 4)
 
 #define DIV_FROM_REG(val)      (1 << (val))
 
index dee93ec87d02a2c3c2f343ed6e2caeb0f7b35384..4ff32ee67fb618e34b786c365a5d26f73f649caa 100644 (file)
@@ -200,19 +200,21 @@ static struct gl520_data *gl520_update_device(struct device *dev)
  * Sysfs stuff
  */
 
-static ssize_t get_cpu_vid(struct device *dev, struct device_attribute *attr,
-                          char *buf)
+static ssize_t cpu0_vid_show(struct device *dev,
+                            struct device_attribute *attr, char *buf)
 {
        struct gl520_data *data = gl520_update_device(dev);
        return sprintf(buf, "%u\n", vid_from_reg(data->vid, data->vrm));
 }
-static DEVICE_ATTR(cpu0_vid, S_IRUGO, get_cpu_vid, NULL);
+static DEVICE_ATTR_RO(cpu0_vid);
 
-#define VDD_FROM_REG(val) (((val) * 95 + 2) / 4)
-#define VDD_TO_REG(val) clamp_val((((val) * 4 + 47) / 95), 0, 255)
+#define VDD_FROM_REG(val)      DIV_ROUND_CLOSEST((val) * 95, 4)
+#define VDD_CLAMP(val)         clamp_val(val, 0, 255 * 95 / 4)
+#define VDD_TO_REG(val)                DIV_ROUND_CLOSEST(VDD_CLAMP(val) * 4, 95)
 
-#define IN_FROM_REG(val) ((val) * 19)
-#define IN_TO_REG(val) clamp_val((((val) + 9) / 19), 0, 255)
+#define IN_FROM_REG(val)       ((val) * 19)
+#define IN_CLAMP(val)          clamp_val(val, 0, 255 * 19)
+#define IN_TO_REG(val)         DIV_ROUND_CLOSEST(IN_CLAMP(val), 19)
 
 static ssize_t get_in_input(struct device *dev, struct device_attribute *attr,
                            char *buf)
@@ -349,8 +351,13 @@ static SENSOR_DEVICE_ATTR(in4_max, S_IRUGO | S_IWUSR,
 
 #define DIV_FROM_REG(val) (1 << (val))
 #define FAN_FROM_REG(val, div) ((val) == 0 ? 0 : (480000 / ((val) << (div))))
-#define FAN_TO_REG(val, div) ((val) <= 0 ? 0 : \
-       clamp_val((480000 + ((val) << ((div)-1))) / ((val) << (div)), 1, 255))
+
+#define FAN_BASE(div)          (480000 >> (div))
+#define FAN_CLAMP(val, div)    clamp_val(val, FAN_BASE(div) / 255, \
+                                         FAN_BASE(div))
+#define FAN_TO_REG(val, div)   ((val) == 0 ? 0 : \
+                                DIV_ROUND_CLOSEST(480000, \
+                                               FAN_CLAMP(val, div) << (div)))
 
 static ssize_t get_fan_input(struct device *dev, struct device_attribute *attr,
                             char *buf)
@@ -381,8 +388,8 @@ static ssize_t get_fan_div(struct device *dev, struct device_attribute *attr,
        return sprintf(buf, "%d\n", DIV_FROM_REG(data->fan_div[n]));
 }
 
-static ssize_t get_fan_off(struct device *dev, struct device_attribute *attr,
-                          char *buf)
+static ssize_t fan1_off_show(struct device *dev,
+                            struct device_attribute *attr, char *buf)
 {
        struct gl520_data *data = gl520_update_device(dev);
        return sprintf(buf, "%d\n", data->fan_off);
@@ -476,8 +483,9 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
        return count;
 }
 
-static ssize_t set_fan_off(struct device *dev, struct device_attribute *attr,
-                          const char *buf, size_t count)
+static ssize_t fan1_off_store(struct device *dev,
+                             struct device_attribute *attr, const char *buf,
+                             size_t count)
 {
        struct gl520_data *data = dev_get_drvdata(dev);
        struct i2c_client *client = data->client;
@@ -510,12 +518,11 @@ static SENSOR_DEVICE_ATTR(fan1_div, S_IRUGO | S_IWUSR,
                get_fan_div, set_fan_div, 0);
 static SENSOR_DEVICE_ATTR(fan2_div, S_IRUGO | S_IWUSR,
                get_fan_div, set_fan_div, 1);
-static DEVICE_ATTR(fan1_off, S_IRUGO | S_IWUSR,
-               get_fan_off, set_fan_off);
+static DEVICE_ATTR_RW(fan1_off);
 
-#define TEMP_FROM_REG(val) (((val) - 130) * 1000)
-#define TEMP_TO_REG(val) clamp_val(((((val) < 0 ? \
-                       (val) - 500 : (val) + 500) / 1000) + 130), 0, 255)
+#define TEMP_FROM_REG(val)     (((val) - 130) * 1000)
+#define TEMP_CLAMP(val)                clamp_val(val, -130000, 125000)
+#define TEMP_TO_REG(val)       (DIV_ROUND_CLOSEST(TEMP_CLAMP(val), 1000) + 130)
 
 static ssize_t get_temp_input(struct device *dev, struct device_attribute *attr,
                              char *buf)
@@ -596,29 +603,30 @@ static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IRUGO | S_IWUSR,
 static SENSOR_DEVICE_ATTR(temp2_max_hyst, S_IRUGO | S_IWUSR,
                get_temp_max_hyst, set_temp_max_hyst, 1);
 
-static ssize_t get_alarms(struct device *dev, struct device_attribute *attr,
-                         char *buf)
+static ssize_t alarms_show(struct device *dev, struct device_attribute *attr,
+                          char *buf)
 {
        struct gl520_data *data = gl520_update_device(dev);
        return sprintf(buf, "%d\n", data->alarms);
 }
 
-static ssize_t get_beep_enable(struct device *dev, struct device_attribute
-                              *attr, char *buf)
+static ssize_t beep_enable_show(struct device *dev,
+                               struct device_attribute *attr, char *buf)
 {
        struct gl520_data *data = gl520_update_device(dev);
        return sprintf(buf, "%d\n", data->beep_enable);
 }
 
-static ssize_t get_beep_mask(struct device *dev, struct device_attribute *attr,
-                            char *buf)
+static ssize_t beep_mask_show(struct device *dev,
+                             struct device_attribute *attr, char *buf)
 {
        struct gl520_data *data = gl520_update_device(dev);
        return sprintf(buf, "%d\n", data->beep_mask);
 }
 
-static ssize_t set_beep_enable(struct device *dev, struct device_attribute
-                              *attr, const char *buf, size_t count)
+static ssize_t beep_enable_store(struct device *dev,
+                                struct device_attribute *attr,
+                                const char *buf, size_t count)
 {
        struct gl520_data *data = dev_get_drvdata(dev);
        struct i2c_client *client = data->client;
@@ -641,8 +649,9 @@ static ssize_t set_beep_enable(struct device *dev, struct device_attribute
        return count;
 }
 
-static ssize_t set_beep_mask(struct device *dev, struct device_attribute *attr,
-                            const char *buf, size_t count)
+static ssize_t beep_mask_store(struct device *dev,
+                              struct device_attribute *attr, const char *buf,
+                              size_t count)
 {
        struct gl520_data *data = dev_get_drvdata(dev);
        struct i2c_client *client = data->client;
@@ -661,11 +670,9 @@ static ssize_t set_beep_mask(struct device *dev, struct device_attribute *attr,
        return count;
 }
 
-static DEVICE_ATTR(alarms, S_IRUGO, get_alarms, NULL);
-static DEVICE_ATTR(beep_enable, S_IRUGO | S_IWUSR,
-               get_beep_enable, set_beep_enable);
-static DEVICE_ATTR(beep_mask, S_IRUGO | S_IWUSR,
-               get_beep_mask, set_beep_mask);
+static DEVICE_ATTR_RO(alarms);
+static DEVICE_ATTR_RW(beep_enable);
+static DEVICE_ATTR_RW(beep_mask);
 
 static ssize_t get_alarm(struct device *dev, struct device_attribute *attr,
                         char *buf)
index 685568b1236d4a26db2d685ce36dcb9729e9f3ab..9c355b9d31c57ac23b663fe5d83460dd11130bba 100644 (file)
@@ -77,8 +77,8 @@ static irqreturn_t fan_alarm_irq_handler(int irq, void *dev_id)
        return IRQ_NONE;
 }
 
-static ssize_t show_fan_alarm(struct device *dev,
-                             struct device_attribute *attr, char *buf)
+static ssize_t fan1_alarm_show(struct device *dev,
+                              struct device_attribute *attr, char *buf)
 {
        struct gpio_fan_data *fan_data = dev_get_drvdata(dev);
        struct gpio_fan_alarm *alarm = fan_data->alarm;
@@ -90,7 +90,7 @@ static ssize_t show_fan_alarm(struct device *dev,
        return sprintf(buf, "%d\n", value);
 }
 
-static DEVICE_ATTR(fan1_alarm, S_IRUGO, show_fan_alarm, NULL);
+static DEVICE_ATTR_RO(fan1_alarm);
 
 static int fan_alarm_init(struct gpio_fan_data *fan_data,
                          struct gpio_fan_alarm *alarm)
@@ -188,8 +188,8 @@ static int rpm_to_speed_index(struct gpio_fan_data *fan_data, unsigned long rpm)
        return fan_data->num_speed - 1;
 }
 
-static ssize_t show_pwm(struct device *dev,
-                       struct device_attribute *attr, char *buf)
+static ssize_t pwm1_show(struct device *dev, struct device_attribute *attr,
+                        char *buf)
 {
        struct gpio_fan_data *fan_data = dev_get_drvdata(dev);
        u8 pwm = fan_data->speed_index * 255 / (fan_data->num_speed - 1);
@@ -197,8 +197,8 @@ static ssize_t show_pwm(struct device *dev,
        return sprintf(buf, "%d\n", pwm);
 }
 
-static ssize_t set_pwm(struct device *dev, struct device_attribute *attr,
-                      const char *buf, size_t count)
+static ssize_t pwm1_store(struct device *dev, struct device_attribute *attr,
+                         const char *buf, size_t count)
 {
        struct gpio_fan_data *fan_data = dev_get_drvdata(dev);
        unsigned long pwm;
@@ -224,16 +224,17 @@ exit_unlock:
        return ret;
 }
 
-static ssize_t show_pwm_enable(struct device *dev,
-                              struct device_attribute *attr, char *buf)
+static ssize_t pwm1_enable_show(struct device *dev,
+                               struct device_attribute *attr, char *buf)
 {
        struct gpio_fan_data *fan_data = dev_get_drvdata(dev);
 
        return sprintf(buf, "%d\n", fan_data->pwm_enable);
 }
 
-static ssize_t set_pwm_enable(struct device *dev, struct device_attribute *attr,
-                             const char *buf, size_t count)
+static ssize_t pwm1_enable_store(struct device *dev,
+                                struct device_attribute *attr,
+                                const char *buf, size_t count)
 {
        struct gpio_fan_data *fan_data = dev_get_drvdata(dev);
        unsigned long val;
@@ -257,22 +258,22 @@ static ssize_t set_pwm_enable(struct device *dev, struct device_attribute *attr,
        return count;
 }
 
-static ssize_t show_pwm_mode(struct device *dev,
-                            struct device_attribute *attr, char *buf)
+static ssize_t pwm1_mode_show(struct device *dev,
+                             struct device_attribute *attr, char *buf)
 {
        return sprintf(buf, "0\n");
 }
 
-static ssize_t show_rpm_min(struct device *dev,
-                           struct device_attribute *attr, char *buf)
+static ssize_t fan1_min_show(struct device *dev,
+                            struct device_attribute *attr, char *buf)
 {
        struct gpio_fan_data *fan_data = dev_get_drvdata(dev);
 
        return sprintf(buf, "%d\n", fan_data->speed[0].rpm);
 }
 
-static ssize_t show_rpm_max(struct device *dev,
-                           struct device_attribute *attr, char *buf)
+static ssize_t fan1_max_show(struct device *dev,
+                            struct device_attribute *attr, char *buf)
 {
        struct gpio_fan_data *fan_data = dev_get_drvdata(dev);
 
@@ -280,8 +281,8 @@ static ssize_t show_rpm_max(struct device *dev,
                       fan_data->speed[fan_data->num_speed - 1].rpm);
 }
 
-static ssize_t show_rpm(struct device *dev,
-                       struct device_attribute *attr, char *buf)
+static ssize_t fan1_input_show(struct device *dev,
+                              struct device_attribute *attr, char *buf)
 {
        struct gpio_fan_data *fan_data = dev_get_drvdata(dev);
 
@@ -313,14 +314,13 @@ exit_unlock:
        return ret;
 }
 
-static DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, show_pwm, set_pwm);
-static DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR,
-                  show_pwm_enable, set_pwm_enable);
-static DEVICE_ATTR(pwm1_mode, S_IRUGO, show_pwm_mode, NULL);
-static DEVICE_ATTR(fan1_min, S_IRUGO, show_rpm_min, NULL);
-static DEVICE_ATTR(fan1_max, S_IRUGO, show_rpm_max, NULL);
-static DEVICE_ATTR(fan1_input, S_IRUGO, show_rpm, NULL);
-static DEVICE_ATTR(fan1_target, S_IRUGO | S_IWUSR, show_rpm, set_rpm);
+static DEVICE_ATTR_RW(pwm1);
+static DEVICE_ATTR_RW(pwm1_enable);
+static DEVICE_ATTR_RO(pwm1_mode);
+static DEVICE_ATTR_RO(fan1_min);
+static DEVICE_ATTR_RO(fan1_max);
+static DEVICE_ATTR_RO(fan1_input);
+static DEVICE_ATTR(fan1_target, S_IRUGO | S_IWUSR, fan1_input_show, set_rpm);
 
 static umode_t gpio_fan_is_visible(struct kobject *kobj,
                                   struct attribute *attr, int index)
index 3932f9276c074d92b7028b32c29e36c6a5aa9738..28375d59cc36c8bebfb08c7f86a009fcd4e74678 100644 (file)
@@ -63,11 +63,11 @@ struct hwmon_thermal_data {
 };
 
 static ssize_t
-show_name(struct device *dev, struct device_attribute *attr, char *buf)
+name_show(struct device *dev, struct device_attribute *attr, char *buf)
 {
        return sprintf(buf, "%s\n", to_hwmon_device(dev)->name);
 }
-static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
+static DEVICE_ATTR_RO(name);
 
 static struct attribute *hwmon_dev_attrs[] = {
        &dev_attr_name.attr,
@@ -544,9 +544,11 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
        struct device *hdev;
        int i, j, err, id;
 
-       /* Do not accept invalid characters in hwmon name attribute */
+       /* Complain about invalid characters in hwmon name attribute */
        if (name && (!strlen(name) || strpbrk(name, "-* \t\n")))
-               return ERR_PTR(-EINVAL);
+               dev_warn(dev,
+                        "hwmon: '%s' is not a valid name attribute, please fix\n",
+                        name);
 
        id = ida_simple_get(&hwmon_ida, 0, 0, GFP_KERNEL);
        if (id < 0)
@@ -606,7 +608,7 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
        if (err)
                goto free_hwmon;
 
-       if (chip && chip->ops->read &&
+       if (dev && chip && chip->ops->read &&
            chip->info[0]->type == hwmon_chip &&
            (chip->info[0]->config[0] & HWMON_C_REGISTER_TZ)) {
                const struct hwmon_channel_info **info = chip->info;
@@ -651,6 +653,9 @@ hwmon_device_register_with_groups(struct device *dev, const char *name,
                                  void *drvdata,
                                  const struct attribute_group **groups)
 {
+       if (!name)
+               return ERR_PTR(-EINVAL);
+
        return __hwmon_device_register(dev, name, drvdata, NULL, groups);
 }
 EXPORT_SYMBOL_GPL(hwmon_device_register_with_groups);
@@ -674,6 +679,9 @@ hwmon_device_register_with_info(struct device *dev, const char *name,
                                const struct hwmon_chip_info *chip,
                                const struct attribute_group **extra_groups)
 {
+       if (!name)
+               return ERR_PTR(-EINVAL);
+
        if (chip && (!chip->ops || !chip->ops->is_visible || !chip->info))
                return ERR_PTR(-EINVAL);
 
@@ -695,7 +703,7 @@ struct device *hwmon_device_register(struct device *dev)
        dev_warn(dev,
                 "hwmon_device_register() is deprecated. Please convert the driver to use hwmon_device_register_with_info().\n");
 
-       return hwmon_device_register_with_groups(dev, NULL, NULL, NULL);
+       return __hwmon_device_register(dev, NULL, NULL, NULL, NULL);
 }
 EXPORT_SYMBOL_GPL(hwmon_device_register);
 
index 3e3ccbf18b4efb0dfa482140cd77d72ddbff5898..400e0675a90bdd2e0e04cdcdd1a5ce3028526375 100644 (file)
@@ -43,8 +43,8 @@
  */
 
 /* Sensor resolution : 0.5 degree C */
-static ssize_t show_temp(struct device *dev,
-                        struct device_attribute *devattr, char *buf)
+static ssize_t temp1_input_show(struct device *dev,
+                               struct device_attribute *devattr, char *buf)
 {
        struct pci_dev *pdev = to_pci_dev(dev->parent);
        long temp;
@@ -83,7 +83,7 @@ static ssize_t show_alarm(struct device *dev,
        return sprintf(buf, "%u\n", (unsigned int)ctsts & (1 << nr));
 }
 
-static DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL);
+static DEVICE_ATTR_RO(temp1_input);
 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, show_thresh, NULL, 0xE2);
 static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IRUGO, show_thresh, NULL, 0xEC);
 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, show_thresh, NULL, 0xEE);
index 6b3d1972cef74d1f55af1846d6a6c91ed26acc61..a5a9f457b7f7a8512f7a1e50a177fedaf5fa91a6 100644 (file)
@@ -114,14 +114,14 @@ struct i5k_amb_data {
        unsigned int num_attrs;
 };
 
-static ssize_t show_name(struct device *dev, struct device_attribute *devattr,
+static ssize_t name_show(struct device *dev, struct device_attribute *devattr,
                         char *buf)
 {
        return sprintf(buf, "%s\n", DRVNAME);
 }
 
 
-static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
+static DEVICE_ATTR_RO(name);
 
 static struct platform_device *amb_pdev;
 
index ad82cb28d87ab9f805a444983030af3a9a18b1da..efb01c247e2d90680f02980af76148799f3823fc 100644 (file)
@@ -12,6 +12,7 @@
  *
  *  Supports: IT8603E  Super I/O chip w/LPC interface
  *            IT8620E  Super I/O chip w/LPC interface
+ *            IT8622E  Super I/O chip w/LPC interface
  *            IT8623E  Super I/O chip w/LPC interface
  *            IT8628E  Super I/O chip w/LPC interface
  *            IT8705F  Super I/O chip w/LPC interface
@@ -31,6 +32,7 @@
  *            IT8783E/F Super I/O chip w/LPC interface
  *            IT8786E  Super I/O chip w/LPC interface
  *            IT8790E  Super I/O chip w/LPC interface
+ *            IT8792E  Super I/O chip w/LPC interface
  *            Sis950   A clone of the IT8705F
  *
  *  Copyright (C) 2001 Chris Gauthron
@@ -69,8 +71,8 @@
 #define DRVNAME "it87"
 
 enum chips { it87, it8712, it8716, it8718, it8720, it8721, it8728, it8732,
-            it8771, it8772, it8781, it8782, it8783, it8786, it8790, it8603,
-            it8620, it8628 };
+            it8771, it8772, it8781, it8782, it8783, it8786, it8790,
+            it8792, it8603, it8620, it8622, it8628 };
 
 static unsigned short force_id;
 module_param(force_id, ushort, 0);
@@ -151,6 +153,7 @@ static inline void superio_exit(int ioreg)
 #define IT8726F_DEVID 0x8726
 #define IT8728F_DEVID 0x8728
 #define IT8732F_DEVID 0x8732
+#define IT8792E_DEVID 0x8733
 #define IT8771E_DEVID 0x8771
 #define IT8772E_DEVID 0x8772
 #define IT8781F_DEVID 0x8781
@@ -160,6 +163,7 @@ static inline void superio_exit(int ioreg)
 #define IT8790E_DEVID 0x8790
 #define IT8603E_DEVID 0x8603
 #define IT8620E_DEVID 0x8620
+#define IT8622E_DEVID 0x8622
 #define IT8623E_DEVID 0x8623
 #define IT8628E_DEVID 0x8628
 #define IT87_ACT_REG  0x30
@@ -293,9 +297,11 @@ struct it87_devices {
 #define FEAT_SIX_FANS          BIT(11) /* Supports six fans */
 #define FEAT_10_9MV_ADC                BIT(12)
 #define FEAT_AVCC3             BIT(13) /* Chip supports in9/AVCC3 */
-#define FEAT_SIX_PWM           BIT(14) /* Chip supports 6 pwm chn */
-#define FEAT_PWM_FREQ2         BIT(15) /* Separate pwm freq 2 */
-#define FEAT_SIX_TEMP          BIT(16) /* Up to 6 temp sensors */
+#define FEAT_FIVE_PWM          BIT(14) /* Chip supports 5 pwm chn */
+#define FEAT_SIX_PWM           BIT(15) /* Chip supports 6 pwm chn */
+#define FEAT_PWM_FREQ2         BIT(16) /* Separate pwm freq 2 */
+#define FEAT_SIX_TEMP          BIT(17) /* Up to 6 temp sensors */
+#define FEAT_VIN3_5V           BIT(18) /* VIN3 connected to +5V */
 
 static const struct it87_devices it87_devices[] = {
        [it87] = {
@@ -419,6 +425,15 @@ static const struct it87_devices it87_devices[] = {
                  | FEAT_PWM_FREQ2,
                .peci_mask = 0x07,
        },
+       [it8792] = {
+               .name = "it8792",
+               .suffix = "E",
+               .features = FEAT_NEWER_AUTOPWM | FEAT_16BIT_FANS
+                 | FEAT_TEMP_OFFSET | FEAT_TEMP_OLD_PECI | FEAT_TEMP_PECI
+                 | FEAT_10_9MV_ADC | FEAT_IN7_INTERNAL,
+               .peci_mask = 0x07,
+               .old_peci_mask = 0x02,  /* Actually reports PCH */
+       },
        [it8603] = {
                .name = "it8603",
                .suffix = "E",
@@ -433,7 +448,16 @@ static const struct it87_devices it87_devices[] = {
                .features = FEAT_NEWER_AUTOPWM | FEAT_12MV_ADC | FEAT_16BIT_FANS
                  | FEAT_TEMP_OFFSET | FEAT_TEMP_PECI | FEAT_SIX_FANS
                  | FEAT_IN7_INTERNAL | FEAT_SIX_PWM | FEAT_PWM_FREQ2
-                 | FEAT_SIX_TEMP,
+                 | FEAT_SIX_TEMP | FEAT_VIN3_5V,
+               .peci_mask = 0x07,
+       },
+       [it8622] = {
+               .name = "it8622",
+               .suffix = "E",
+               .features = FEAT_NEWER_AUTOPWM | FEAT_12MV_ADC | FEAT_16BIT_FANS
+                 | FEAT_TEMP_OFFSET | FEAT_TEMP_PECI | FEAT_FIVE_FANS
+                 | FEAT_FIVE_PWM | FEAT_IN7_INTERNAL | FEAT_PWM_FREQ2
+                 | FEAT_AVCC3 | FEAT_VIN3_5V,
                .peci_mask = 0x07,
        },
        [it8628] = {
@@ -442,7 +466,7 @@ static const struct it87_devices it87_devices[] = {
                .features = FEAT_NEWER_AUTOPWM | FEAT_12MV_ADC | FEAT_16BIT_FANS
                  | FEAT_TEMP_OFFSET | FEAT_TEMP_PECI | FEAT_SIX_FANS
                  | FEAT_IN7_INTERNAL | FEAT_SIX_PWM | FEAT_PWM_FREQ2
-                 | FEAT_SIX_TEMP,
+                 | FEAT_SIX_TEMP | FEAT_VIN3_5V,
                .peci_mask = 0x07,
        },
 };
@@ -465,9 +489,12 @@ static const struct it87_devices it87_devices[] = {
 #define has_in7_internal(data) ((data)->features & FEAT_IN7_INTERNAL)
 #define has_six_fans(data)     ((data)->features & FEAT_SIX_FANS)
 #define has_avcc3(data)                ((data)->features & FEAT_AVCC3)
+#define has_five_pwm(data)     ((data)->features & (FEAT_FIVE_PWM \
+                                                    | FEAT_SIX_PWM))
 #define has_six_pwm(data)      ((data)->features & FEAT_SIX_PWM)
 #define has_pwm_freq2(data)    ((data)->features & FEAT_PWM_FREQ2)
 #define has_six_temp(data)     ((data)->features & FEAT_SIX_TEMP)
+#define has_vin3_5v(data)      ((data)->features & FEAT_VIN3_5V)
 
 struct it87_sio_data {
        enum chips type;
@@ -1300,25 +1327,35 @@ static ssize_t set_pwm_enable(struct device *dev, struct device_attribute *attr,
                        it87_write_value(data, IT87_REG_FAN_MAIN_CTRL,
                                         data->fan_main_ctrl);
                } else {
+                       u8 ctrl;
+
                        /* No on/off mode, set maximum pwm value */
                        data->pwm_duty[nr] = pwm_to_reg(data, 0xff);
                        it87_write_value(data, IT87_REG_PWM_DUTY[nr],
                                         data->pwm_duty[nr]);
                        /* and set manual mode */
-                       data->pwm_ctrl[nr] = has_newer_autopwm(data) ?
-                                            data->pwm_temp_map[nr] :
-                                            data->pwm_duty[nr];
-                       it87_write_value(data, IT87_REG_PWM[nr],
-                                        data->pwm_ctrl[nr]);
+                       if (has_newer_autopwm(data)) {
+                               ctrl = (data->pwm_ctrl[nr] & 0x7c) |
+                                       data->pwm_temp_map[nr];
+                       } else {
+                               ctrl = data->pwm_duty[nr];
+                       }
+                       data->pwm_ctrl[nr] = ctrl;
+                       it87_write_value(data, IT87_REG_PWM[nr], ctrl);
                }
        } else {
-               if (val == 1)                           /* Manual mode */
-                       data->pwm_ctrl[nr] = has_newer_autopwm(data) ?
-                                            data->pwm_temp_map[nr] :
-                                            data->pwm_duty[nr];
-               else                                    /* Automatic mode */
-                       data->pwm_ctrl[nr] = 0x80 | data->pwm_temp_map[nr];
-               it87_write_value(data, IT87_REG_PWM[nr], data->pwm_ctrl[nr]);
+               u8 ctrl;
+
+               if (has_newer_autopwm(data)) {
+                       ctrl = (data->pwm_ctrl[nr] & 0x7c) |
+                               data->pwm_temp_map[nr];
+                       if (val != 1)
+                               ctrl |= 0x80;
+               } else {
+                       ctrl = (val == 1 ? data->pwm_duty[nr] : 0x80);
+               }
+               data->pwm_ctrl[nr] = ctrl;
+               it87_write_value(data, IT87_REG_PWM[nr], ctrl);
 
                if (data->type != it8603 && nr < 3) {
                        /* set SmartGuardian mode */
@@ -1344,6 +1381,7 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *attr,
                return -EINVAL;
 
        mutex_lock(&data->update_lock);
+       it87_update_pwm_ctrl(data, nr);
        if (has_newer_autopwm(data)) {
                /*
                 * If we are in automatic mode, the PWM duty cycle register
@@ -1456,13 +1494,15 @@ static ssize_t set_pwm_temp_map(struct device *dev,
        }
 
        mutex_lock(&data->update_lock);
+       it87_update_pwm_ctrl(data, nr);
        data->pwm_temp_map[nr] = reg;
        /*
         * If we are in automatic mode, write the temp mapping immediately;
         * otherwise, just store it for later use.
         */
        if (data->pwm_ctrl[nr] & 0x80) {
-               data->pwm_ctrl[nr] = 0x80 | data->pwm_temp_map[nr];
+               data->pwm_ctrl[nr] = (data->pwm_ctrl[nr] & 0xfc) |
+                                               data->pwm_temp_map[nr];
                it87_write_value(data, IT87_REG_PWM[nr], data->pwm_ctrl[nr]);
        }
        mutex_unlock(&data->update_lock);
@@ -1762,14 +1802,14 @@ static SENSOR_DEVICE_ATTR(pwm6_auto_slope, S_IRUGO | S_IWUSR,
                          show_auto_pwm_slope, set_auto_pwm_slope, 5);
 
 /* Alarms */
-static ssize_t show_alarms(struct device *dev, struct device_attribute *attr,
+static ssize_t alarms_show(struct device *dev, struct device_attribute *attr,
                           char *buf)
 {
        struct it87_data *data = it87_update_device(dev);
 
        return sprintf(buf, "%u\n", data->alarms);
 }
-static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
+static DEVICE_ATTR_RO(alarms);
 
 static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
                          char *buf)
@@ -1877,16 +1917,16 @@ static SENSOR_DEVICE_ATTR(temp1_beep, S_IRUGO | S_IWUSR,
 static SENSOR_DEVICE_ATTR(temp2_beep, S_IRUGO, show_beep, NULL, 2);
 static SENSOR_DEVICE_ATTR(temp3_beep, S_IRUGO, show_beep, NULL, 2);
 
-static ssize_t show_vrm_reg(struct device *dev, struct device_attribute *attr,
-                           char *buf)
+static ssize_t vrm_show(struct device *dev, struct device_attribute *attr,
+                       char *buf)
 {
        struct it87_data *data = dev_get_drvdata(dev);
 
        return sprintf(buf, "%u\n", data->vrm);
 }
 
-static ssize_t store_vrm_reg(struct device *dev, struct device_attribute *attr,
-                            const char *buf, size_t count)
+static ssize_t vrm_store(struct device *dev, struct device_attribute *attr,
+                        const char *buf, size_t count)
 {
        struct it87_data *data = dev_get_drvdata(dev);
        unsigned long val;
@@ -1898,16 +1938,16 @@ static ssize_t store_vrm_reg(struct device *dev, struct device_attribute *attr,
 
        return count;
 }
-static DEVICE_ATTR(vrm, S_IRUGO | S_IWUSR, show_vrm_reg, store_vrm_reg);
+static DEVICE_ATTR_RW(vrm);
 
-static ssize_t show_vid_reg(struct device *dev, struct device_attribute *attr,
-                           char *buf)
+static ssize_t cpu0_vid_show(struct device *dev,
+                            struct device_attribute *attr, char *buf)
 {
        struct it87_data *data = it87_update_device(dev);
 
        return sprintf(buf, "%ld\n", (long)vid_from_reg(data->vid, data->vrm));
 }
-static DEVICE_ATTR(cpu0_vid, S_IRUGO, show_vid_reg, NULL);
+static DEVICE_ATTR_RO(cpu0_vid);
 
 static ssize_t show_label(struct device *dev, struct device_attribute *attr,
                          char *buf)
@@ -1916,17 +1956,21 @@ static ssize_t show_label(struct device *dev, struct device_attribute *attr,
                "+5V",
                "5VSB",
                "Vbat",
+               "AVCC",
        };
        static const char * const labels_it8721[] = {
                "+3.3V",
                "3VSB",
                "Vbat",
+               "+3.3V",
        };
        struct it87_data *data = dev_get_drvdata(dev);
        int nr = to_sensor_dev_attr(attr)->index;
        const char *label;
 
-       if (has_12mv_adc(data) || has_10_9mv_adc(data))
+       if (has_vin3_5v(data) && nr == 0)
+               label = labels[0];
+       else if (has_12mv_adc(data) || has_10_9mv_adc(data))
                label = labels_it8721[nr];
        else
                label = labels[nr];
@@ -1937,7 +1981,7 @@ static SENSOR_DEVICE_ATTR(in3_label, S_IRUGO, show_label, NULL, 0);
 static SENSOR_DEVICE_ATTR(in7_label, S_IRUGO, show_label, NULL, 1);
 static SENSOR_DEVICE_ATTR(in8_label, S_IRUGO, show_label, NULL, 2);
 /* AVCC3 */
-static SENSOR_DEVICE_ATTR(in9_label, S_IRUGO, show_label, NULL, 0);
+static SENSOR_DEVICE_ATTR(in9_label, S_IRUGO, show_label, NULL, 3);
 
 static umode_t it87_in_is_visible(struct kobject *kobj,
                                  struct attribute *attr, int index)
@@ -2386,6 +2430,9 @@ static int __init it87_find(int sioaddr, unsigned short *address,
        case IT8732F_DEVID:
                sio_data->type = it8732;
                break;
+       case IT8792E_DEVID:
+               sio_data->type = it8792;
+               break;
        case IT8771E_DEVID:
                sio_data->type = it8771;
                break;
@@ -2414,6 +2461,9 @@ static int __init it87_find(int sioaddr, unsigned short *address,
        case IT8620E_DEVID:
                sio_data->type = it8620;
                break;
+       case IT8622E_DEVID:
+               sio_data->type = it8622;
+               break;
        case IT8628E_DEVID:
                sio_data->type = it8628;
                break;
@@ -2457,8 +2507,10 @@ static int __init it87_find(int sioaddr, unsigned short *address,
        else
                sio_data->skip_in |= BIT(9);
 
-       if (!has_six_pwm(config))
+       if (!has_five_pwm(config))
                sio_data->skip_pwm |= BIT(3) | BIT(4) | BIT(5);
+       else if (!has_six_pwm(config))
+               sio_data->skip_pwm |= BIT(5);
 
        if (!has_vid(config))
                sio_data->skip_vid = 1;
@@ -2587,7 +2639,7 @@ static int __init it87_find(int sioaddr, unsigned short *address,
 
                /* Check for pwm4 */
                reg = superio_inb(sioaddr, IT87_SIO_GPIO4_REG);
-               if (!(reg & BIT(2)))
+               if (reg & BIT(2))
                        sio_data->skip_pwm |= BIT(3);
 
                /* Check for pwm2, fan2 */
@@ -2602,6 +2654,50 @@ static int __init it87_find(int sioaddr, unsigned short *address,
                        sio_data->skip_fan |= BIT(5);
                }
 
+               /* Check if AVCC is on VIN3 */
+               reg = superio_inb(sioaddr, IT87_SIO_PINX2_REG);
+               if (reg & BIT(0))
+                       sio_data->internal |= BIT(0);
+               else
+                       sio_data->skip_in |= BIT(9);
+
+               sio_data->beep_pin = superio_inb(sioaddr,
+                                                IT87_SIO_BEEP_PIN_REG) & 0x3f;
+       } else if (sio_data->type == it8622) {
+               int reg;
+
+               superio_select(sioaddr, GPIO);
+
+               /* Check for pwm4, fan4 */
+               reg = superio_inb(sioaddr, IT87_SIO_GPIO1_REG);
+               if (reg & BIT(6))
+                       sio_data->skip_fan |= BIT(3);
+               if (reg & BIT(5))
+                       sio_data->skip_pwm |= BIT(3);
+
+               /* Check for pwm3, fan3, pwm5, fan5 */
+               reg = superio_inb(sioaddr, IT87_SIO_GPIO3_REG);
+               if (reg & BIT(6))
+                       sio_data->skip_pwm |= BIT(2);
+               if (reg & BIT(7))
+                       sio_data->skip_fan |= BIT(2);
+               if (reg & BIT(3))
+                       sio_data->skip_pwm |= BIT(4);
+               if (reg & BIT(1))
+                       sio_data->skip_fan |= BIT(4);
+
+               /* Check for pwm2, fan2 */
+               reg = superio_inb(sioaddr, IT87_SIO_GPIO5_REG);
+               if (reg & BIT(1))
+                       sio_data->skip_pwm |= BIT(1);
+               if (reg & BIT(2))
+                       sio_data->skip_fan |= BIT(1);
+
+               /* Check for AVCC */
+               reg = superio_inb(sioaddr, IT87_SIO_PINX2_REG);
+               if (!(reg & BIT(0)))
+                       sio_data->skip_in |= BIT(9);
+
                sio_data->beep_pin = superio_inb(sioaddr,
                                                 IT87_SIO_BEEP_PIN_REG) & 0x3f;
        } else {
index 0621ee1b3c98f1ebb9a86fb316fb8d0690a6e9e1..2d40a2e771d75db582e4eed9d497b47c5a4e9ab2 100644 (file)
@@ -44,8 +44,8 @@ static irqreturn_t jz4740_hwmon_irq(int irq, void *data)
        return IRQ_HANDLED;
 }
 
-static ssize_t jz4740_hwmon_read_adcin(struct device *dev,
-       struct device_attribute *dev_attr, char *buf)
+static ssize_t in0_input_show(struct device *dev,
+                             struct device_attribute *dev_attr, char *buf)
 {
        struct jz4740_hwmon *hwmon = dev_get_drvdata(dev);
        struct platform_device *pdev = hwmon->pdev;
@@ -79,7 +79,7 @@ static ssize_t jz4740_hwmon_read_adcin(struct device *dev,
        return ret;
 }
 
-static DEVICE_ATTR(in0_input, S_IRUGO, jz4740_hwmon_read_adcin, NULL);
+static DEVICE_ATTR_RO(in0_input);
 
 static struct attribute *jz4740_attrs[] = {
        &dev_attr_in0_input.attr,
index 9cdfde6515ad9b12fdea45fb2a1be71b2a8d6838..ce3b91f22e30afb474d81f981eca7f326c076e25 100644 (file)
@@ -72,8 +72,8 @@ static void amd_nb_smu_index_read(struct pci_dev *pdev, unsigned int devfn,
        mutex_unlock(&nb_smu_ind_mutex);
 }
 
-static ssize_t show_temp(struct device *dev,
-                        struct device_attribute *attr, char *buf)
+static ssize_t temp1_input_show(struct device *dev,
+                               struct device_attribute *attr, char *buf)
 {
        u32 regval;
        struct pci_dev *pdev = dev_get_drvdata(dev);
@@ -88,8 +88,8 @@ static ssize_t show_temp(struct device *dev,
        return sprintf(buf, "%u\n", (regval >> 21) * 125);
 }
 
-static ssize_t show_temp_max(struct device *dev,
-                            struct device_attribute *attr, char *buf)
+static ssize_t temp1_max_show(struct device *dev,
+                             struct device_attribute *attr, char *buf)
 {
        return sprintf(buf, "%d\n", 70 * 1000);
 }
@@ -110,8 +110,8 @@ static ssize_t show_temp_crit(struct device *dev,
        return sprintf(buf, "%d\n", value);
 }
 
-static DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL);
-static DEVICE_ATTR(temp1_max, S_IRUGO, show_temp_max, NULL);
+static DEVICE_ATTR_RO(temp1_input);
+static DEVICE_ATTR_RO(temp1_max);
 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, show_temp_crit, NULL, 0);
 static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, show_temp_crit, NULL, 1);
 
index 734d55d48cc88fcf1cb3ededba02cb6cb80961ae..5a632bcf869bbf78d3835474a066eaa76b84688d 100644 (file)
@@ -100,7 +100,7 @@ static struct k8temp_data *k8temp_update_device(struct device *dev)
  * Sysfs stuff
  */
 
-static ssize_t show_name(struct device *dev, struct device_attribute
+static ssize_t name_show(struct device *dev, struct device_attribute
                         *devattr, char *buf)
 {
        struct k8temp_data *data = dev_get_drvdata(dev);
@@ -133,7 +133,7 @@ static SENSOR_DEVICE_ATTR_2(temp1_input, S_IRUGO, show_temp, NULL, 0, 0);
 static SENSOR_DEVICE_ATTR_2(temp2_input, S_IRUGO, show_temp, NULL, 0, 1);
 static SENSOR_DEVICE_ATTR_2(temp3_input, S_IRUGO, show_temp, NULL, 1, 0);
 static SENSOR_DEVICE_ATTR_2(temp4_input, S_IRUGO, show_temp, NULL, 1, 1);
-static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
+static DEVICE_ATTR_RO(name);
 
 static const struct pci_device_id k8temp_ids[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
index 33bfdb4441383608d5c6de8bb42ba09722e68e64..2e1948699114002b6f8b2ea7da30e324e43392b4 100644 (file)
@@ -417,16 +417,16 @@ static ssize_t set_pwm1(struct device *dev, struct device_attribute *devattr,
        return count;
 }
 
-static ssize_t show_pwm1_enable(struct device *dev,
+static ssize_t pwm1_enable_show(struct device *dev,
                                struct device_attribute *dummy, char *buf)
 {
        struct lm63_data *data = lm63_update_device(dev);
        return sprintf(buf, "%d\n", data->config_fan & 0x20 ? 1 : 2);
 }
 
-static ssize_t set_pwm1_enable(struct device *dev,
-                              struct device_attribute *dummy,
-                              const char *buf, size_t count)
+static ssize_t pwm1_enable_store(struct device *dev,
+                                struct device_attribute *dummy,
+                                const char *buf, size_t count)
 {
        struct lm63_data *data = dev_get_drvdata(dev);
        struct i2c_client *client = data->client;
@@ -600,7 +600,7 @@ static ssize_t set_temp11(struct device *dev, struct device_attribute *devattr,
  * Hysteresis register holds a relative value, while we want to present
  * an absolute to user-space
  */
-static ssize_t show_temp2_crit_hyst(struct device *dev,
+static ssize_t temp2_crit_hyst_show(struct device *dev,
                                    struct device_attribute *dummy, char *buf)
 {
        struct lm63_data *data = lm63_update_device(dev);
@@ -624,9 +624,9 @@ static ssize_t show_lut_temp_hyst(struct device *dev,
  * And now the other way around, user-space provides an absolute
  * hysteresis value and we have to store a relative one
  */
-static ssize_t set_temp2_crit_hyst(struct device *dev,
-                                  struct device_attribute *dummy,
-                                  const char *buf, size_t count)
+static ssize_t temp2_crit_hyst_store(struct device *dev,
+                                    struct device_attribute *dummy,
+                                    const char *buf, size_t count)
 {
        struct lm63_data *data = dev_get_drvdata(dev);
        struct i2c_client *client = data->client;
@@ -670,7 +670,7 @@ static void lm63_set_convrate(struct lm63_data *data, unsigned int interval)
        data->update_interval = UPDATE_INTERVAL(data->max_convrate_hz, i);
 }
 
-static ssize_t show_update_interval(struct device *dev,
+static ssize_t update_interval_show(struct device *dev,
                                    struct device_attribute *attr, char *buf)
 {
        struct lm63_data *data = dev_get_drvdata(dev);
@@ -678,9 +678,9 @@ static ssize_t show_update_interval(struct device *dev,
        return sprintf(buf, "%u\n", data->update_interval);
 }
 
-static ssize_t set_update_interval(struct device *dev,
-                                  struct device_attribute *attr,
-                                  const char *buf, size_t count)
+static ssize_t update_interval_store(struct device *dev,
+                                    struct device_attribute *attr,
+                                    const char *buf, size_t count)
 {
        struct lm63_data *data = dev_get_drvdata(dev);
        unsigned long val;
@@ -697,16 +697,17 @@ static ssize_t set_update_interval(struct device *dev,
        return count;
 }
 
-static ssize_t show_type(struct device *dev, struct device_attribute *attr,
-                        char *buf)
+static ssize_t temp2_type_show(struct device *dev,
+                              struct device_attribute *attr, char *buf)
 {
        struct lm63_data *data = dev_get_drvdata(dev);
 
        return sprintf(buf, data->trutherm ? "1\n" : "2\n");
 }
 
-static ssize_t set_type(struct device *dev, struct device_attribute *attr,
-                       const char *buf, size_t count)
+static ssize_t temp2_type_store(struct device *dev,
+                               struct device_attribute *attr,
+                               const char *buf, size_t count)
 {
        struct lm63_data *data = dev_get_drvdata(dev);
        struct i2c_client *client = data->client;
@@ -731,7 +732,7 @@ static ssize_t set_type(struct device *dev, struct device_attribute *attr,
        return count;
 }
 
-static ssize_t show_alarms(struct device *dev, struct device_attribute *dummy,
+static ssize_t alarms_show(struct device *dev, struct device_attribute *dummy,
                           char *buf)
 {
        struct lm63_data *data = lm63_update_device(dev);
@@ -753,8 +754,7 @@ static SENSOR_DEVICE_ATTR(fan1_min, S_IWUSR | S_IRUGO, show_fan,
        set_fan, 1);
 
 static SENSOR_DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, show_pwm1, set_pwm1, 0);
-static DEVICE_ATTR(pwm1_enable, S_IWUSR | S_IRUGO,
-       show_pwm1_enable, set_pwm1_enable);
+static DEVICE_ATTR_RW(pwm1_enable);
 static SENSOR_DEVICE_ATTR(pwm1_auto_point1_pwm, S_IWUSR | S_IRUGO,
        show_pwm1, set_pwm1, 1);
 static SENSOR_DEVICE_ATTR(pwm1_auto_point1_temp, S_IWUSR | S_IRUGO,
@@ -841,10 +841,9 @@ static SENSOR_DEVICE_ATTR(temp2_offset, S_IWUSR | S_IRUGO, show_temp11,
        set_temp11, 3);
 static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, show_remote_temp8,
        set_temp8, 2);
-static DEVICE_ATTR(temp2_crit_hyst, S_IWUSR | S_IRUGO, show_temp2_crit_hyst,
-       set_temp2_crit_hyst);
+static DEVICE_ATTR_RW(temp2_crit_hyst);
 
-static DEVICE_ATTR(temp2_type, S_IWUSR | S_IRUGO, show_type, set_type);
+static DEVICE_ATTR_RW(temp2_type);
 
 /* Individual alarm files */
 static SENSOR_DEVICE_ATTR(fan1_min_alarm, S_IRUGO, show_alarm, NULL, 0);
@@ -854,10 +853,9 @@ static SENSOR_DEVICE_ATTR(temp2_min_alarm, S_IRUGO, show_alarm, NULL, 3);
 static SENSOR_DEVICE_ATTR(temp2_max_alarm, S_IRUGO, show_alarm, NULL, 4);
 static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 6);
 /* Raw alarm file for compatibility */
-static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
+static DEVICE_ATTR_RO(alarms);
 
-static DEVICE_ATTR(update_interval, S_IRUGO | S_IWUSR, show_update_interval,
-                  set_update_interval);
+static DEVICE_ATTR_RW(update_interval);
 
 static struct attribute *lm63_attributes[] = {
        &sensor_dev_attr_pwm1.dev_attr.attr,
index 583f883a4cfe61eb2bcaa9530a4c146eb8d23045..543556dc563b870f5dc0e11a8418052d601e07d7 100644 (file)
@@ -46,6 +46,7 @@
 #define LM70_CHIP_TMP121       1       /* TI TMP121/TMP123 */
 #define LM70_CHIP_LM71         2       /* NS LM71 */
 #define LM70_CHIP_LM74         3       /* NS LM74 */
+#define LM70_CHIP_TMP122       4       /* TI TMP122/TMP124 */
 
 struct lm70 {
        struct spi_device *spi;
@@ -54,8 +55,8 @@ struct lm70 {
 };
 
 /* sysfs hook function */
-static ssize_t lm70_sense_temp(struct device *dev,
-               struct device_attribute *attr, char *buf)
+static ssize_t temp1_input_show(struct device *dev,
+                               struct device_attribute *attr, char *buf)
 {
        struct lm70 *p_lm70 = dev_get_drvdata(dev);
        struct spi_device *spi = p_lm70->spi;
@@ -72,7 +73,8 @@ static ssize_t lm70_sense_temp(struct device *dev,
         */
        status = spi_write_then_read(spi, NULL, 0, &rxbuf[0], 2);
        if (status < 0) {
-               pr_warn("spi_write_then_read failed with status %d\n", status);
+               dev_warn(dev, "spi_write_then_read failed with status %d\n",
+                        status);
                goto out;
        }
        raw = (rxbuf[0] << 8) + rxbuf[1];
@@ -91,7 +93,7 @@ static ssize_t lm70_sense_temp(struct device *dev,
         * Celsius.
         * So it's equivalent to multiplying by 0.25 * 1000 = 250.
         *
-        * LM74 and TMP121/TMP123:
+        * LM74 and TMP121/TMP122/TMP123/TMP124:
         * 13 bits of 2's complement data, discard LSB 3 bits,
         * resolution 0.0625 degrees celsius.
         *
@@ -105,6 +107,7 @@ static ssize_t lm70_sense_temp(struct device *dev,
                break;
 
        case LM70_CHIP_TMP121:
+       case LM70_CHIP_TMP122:
        case LM70_CHIP_LM74:
                val = ((int)raw / 8) * 625 / 10;
                break;
@@ -120,7 +123,7 @@ out:
        return status;
 }
 
-static DEVICE_ATTR(temp1_input, S_IRUGO, lm70_sense_temp, NULL);
+static DEVICE_ATTR_RO(temp1_input);
 
 static struct attribute *lm70_attrs[] = {
        &dev_attr_temp1_input.attr,
@@ -141,6 +144,10 @@ static const struct of_device_id lm70_of_ids[] = {
                .compatible = "ti,tmp121",
                .data = (void *) LM70_CHIP_TMP121,
        },
+       {
+               .compatible = "ti,tmp122",
+               .data = (void *) LM70_CHIP_TMP122,
+       },
        {
                .compatible = "ti,lm71",
                .data = (void *) LM70_CHIP_LM71,
@@ -190,6 +197,7 @@ static int lm70_probe(struct spi_device *spi)
 static const struct spi_device_id lm70_ids[] = {
        { "lm70",   LM70_CHIP_LM70 },
        { "tmp121", LM70_CHIP_TMP121 },
+       { "tmp122", LM70_CHIP_TMP122 },
        { "lm71",   LM70_CHIP_LM71 },
        { "lm74",   LM70_CHIP_LM74 },
        { },
index 539efe4ad991930437efd79caa004ebc5dabe360..0cb7ff613b80b9db3a40eb076ea33a9e659b58b3 100644 (file)
@@ -236,22 +236,23 @@ show_in_offset(5);
 show_in_offset(6);
 
 /* Temperature */
-static ssize_t show_temp(struct device *dev, struct device_attribute *da,
-                        char *buf)
+static ssize_t temp1_input_show(struct device *dev,
+                               struct device_attribute *da, char *buf)
 {
        struct lm78_data *data = lm78_update_device(dev);
        return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp));
 }
 
-static ssize_t show_temp_over(struct device *dev, struct device_attribute *da,
+static ssize_t temp1_max_show(struct device *dev, struct device_attribute *da,
                              char *buf)
 {
        struct lm78_data *data = lm78_update_device(dev);
        return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_over));
 }
 
-static ssize_t set_temp_over(struct device *dev, struct device_attribute *da,
-                            const char *buf, size_t count)
+static ssize_t temp1_max_store(struct device *dev,
+                              struct device_attribute *da, const char *buf,
+                              size_t count)
 {
        struct lm78_data *data = dev_get_drvdata(dev);
        long val;
@@ -268,15 +269,16 @@ static ssize_t set_temp_over(struct device *dev, struct device_attribute *da,
        return count;
 }
 
-static ssize_t show_temp_hyst(struct device *dev, struct device_attribute *da,
-                             char *buf)
+static ssize_t temp1_max_hyst_show(struct device *dev,
+                                  struct device_attribute *da, char *buf)
 {
        struct lm78_data *data = lm78_update_device(dev);
        return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_hyst));
 }
 
-static ssize_t set_temp_hyst(struct device *dev, struct device_attribute *da,
-                            const char *buf, size_t count)
+static ssize_t temp1_max_hyst_store(struct device *dev,
+                                   struct device_attribute *da,
+                                   const char *buf, size_t count)
 {
        struct lm78_data *data = dev_get_drvdata(dev);
        long val;
@@ -293,11 +295,9 @@ static ssize_t set_temp_hyst(struct device *dev, struct device_attribute *da,
        return count;
 }
 
-static DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL);
-static DEVICE_ATTR(temp1_max, S_IRUGO | S_IWUSR,
-               show_temp_over, set_temp_over);
-static DEVICE_ATTR(temp1_max_hyst, S_IRUGO | S_IWUSR,
-               show_temp_hyst, set_temp_hyst);
+static DEVICE_ATTR_RO(temp1_input);
+static DEVICE_ATTR_RW(temp1_max);
+static DEVICE_ATTR_RW(temp1_max_hyst);
 
 /* 3 Fans */
 static ssize_t show_fan(struct device *dev, struct device_attribute *da,
@@ -431,22 +431,22 @@ static SENSOR_DEVICE_ATTR(fan2_div, S_IRUGO | S_IWUSR,
 static SENSOR_DEVICE_ATTR(fan3_div, S_IRUGO, show_fan_div, NULL, 2);
 
 /* VID */
-static ssize_t show_vid(struct device *dev, struct device_attribute *da,
-                       char *buf)
+static ssize_t cpu0_vid_show(struct device *dev, struct device_attribute *da,
+                            char *buf)
 {
        struct lm78_data *data = lm78_update_device(dev);
        return sprintf(buf, "%d\n", vid_from_reg(data->vid, 82));
 }
-static DEVICE_ATTR(cpu0_vid, S_IRUGO, show_vid, NULL);
+static DEVICE_ATTR_RO(cpu0_vid);
 
 /* Alarms */
-static ssize_t show_alarms(struct device *dev, struct device_attribute *da,
+static ssize_t alarms_show(struct device *dev, struct device_attribute *da,
                           char *buf)
 {
        struct lm78_data *data = lm78_update_device(dev);
        return sprintf(buf, "%u\n", data->alarms);
 }
-static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
+static DEVICE_ATTR_RO(alarms);
 
 static ssize_t show_alarm(struct device *dev, struct device_attribute *da,
                          char *buf)
index 4bcd9b882948c16f9a190cf061efc94a5e899ce9..08e3945a6fbfdebbfdb67cfa7fa5696814cca27b 100644 (file)
@@ -432,7 +432,7 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *devattr,
        return count;
 }
 
-static ssize_t show_alarms(struct device *dev, struct device_attribute *attr,
+static ssize_t alarms_show(struct device *dev, struct device_attribute *attr,
                           char *buf)
 {
        struct lm80_data *data = lm80_update_device(dev);
@@ -505,7 +505,7 @@ static SENSOR_DEVICE_ATTR(temp1_crit, S_IWUSR | S_IRUGO, show_temp,
                set_temp, t_os_max);
 static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IWUSR | S_IRUGO, show_temp,
                set_temp, t_os_hyst);
-static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
+static DEVICE_ATTR_RO(alarms);
 static SENSOR_DEVICE_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 0);
 static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 1);
 static SENSOR_DEVICE_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, 2);
index 9e4d0e1d3c4b79b1ebcaf8ab49b7688b39392f38..cbfd0bb7f1355ffb33bcc17d88874c63d492b91f 100644 (file)
@@ -188,7 +188,7 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *devattr,
        return count;
 }
 
-static ssize_t show_alarms(struct device *dev, struct device_attribute *dummy,
+static ssize_t alarms_show(struct device *dev, struct device_attribute *dummy,
                           char *buf)
 {
        struct lm83_data *data = lm83_update_device(dev);
@@ -236,7 +236,7 @@ static SENSOR_DEVICE_ATTR(temp4_max_alarm, S_IRUGO, show_alarm, NULL, 12);
 static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_alarm, NULL, 13);
 static SENSOR_DEVICE_ATTR(temp2_max_alarm, S_IRUGO, show_alarm, NULL, 15);
 /* Raw alarm file for compatibility */
-static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
+static DEVICE_ATTR_RO(alarms);
 
 static struct attribute *lm83_attributes[] = {
        &sensor_dev_attr_temp1_input.dev_attr.attr,
index 29c8136ce9c50c5c90156cdb91915e642eabea35..691469ffa24ec73a73ac73deabd13871e18c7df3 100644 (file)
@@ -604,8 +604,8 @@ show_fan_offset(4);
 
 /* vid, vrm, alarms */
 
-static ssize_t show_vid_reg(struct device *dev, struct device_attribute *attr,
-               char *buf)
+static ssize_t cpu0_vid_show(struct device *dev,
+                            struct device_attribute *attr, char *buf)
 {
        struct lm85_data *data = lm85_update_device(dev);
        int vid;
@@ -621,17 +621,17 @@ static ssize_t show_vid_reg(struct device *dev, struct device_attribute *attr,
        return sprintf(buf, "%d\n", vid);
 }
 
-static DEVICE_ATTR(cpu0_vid, S_IRUGO, show_vid_reg, NULL);
+static DEVICE_ATTR_RO(cpu0_vid);
 
-static ssize_t show_vrm_reg(struct device *dev, struct device_attribute *attr,
-               char *buf)
+static ssize_t vrm_show(struct device *dev, struct device_attribute *attr,
+                       char *buf)
 {
        struct lm85_data *data = dev_get_drvdata(dev);
        return sprintf(buf, "%ld\n", (long) data->vrm);
 }
 
-static ssize_t store_vrm_reg(struct device *dev, struct device_attribute *attr,
-               const char *buf, size_t count)
+static ssize_t vrm_store(struct device *dev, struct device_attribute *attr,
+                        const char *buf, size_t count)
 {
        struct lm85_data *data = dev_get_drvdata(dev);
        unsigned long val;
@@ -648,16 +648,16 @@ static ssize_t store_vrm_reg(struct device *dev, struct device_attribute *attr,
        return count;
 }
 
-static DEVICE_ATTR(vrm, S_IRUGO | S_IWUSR, show_vrm_reg, store_vrm_reg);
+static DEVICE_ATTR_RW(vrm);
 
-static ssize_t show_alarms_reg(struct device *dev, struct device_attribute
-               *attr, char *buf)
+static ssize_t alarms_show(struct device *dev, struct device_attribute *attr,
+                          char *buf)
 {
        struct lm85_data *data = lm85_update_device(dev);
        return sprintf(buf, "%u\n", data->alarms);
 }
 
-static DEVICE_ATTR(alarms, S_IRUGO, show_alarms_reg, NULL);
+static DEVICE_ATTR_RO(alarms);
 
 static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
                char *buf)
index 13cca3606e060cf441022587cdfa203313bb232a..e06faf9d3f0f4f38375714e998689d2edc7725c4 100644 (file)
@@ -445,23 +445,23 @@ set_temp(1);
 set_temp(2);
 set_temp(3);
 
-static ssize_t show_temp_crit_int(struct device *dev,
-                                 struct device_attribute *attr, char *buf)
+static ssize_t temp1_crit_show(struct device *dev,
+                              struct device_attribute *attr, char *buf)
 {
        struct lm87_data *data = lm87_update_device(dev);
        return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_crit_int));
 }
 
-static ssize_t show_temp_crit_ext(struct device *dev,
-                                 struct device_attribute *attr, char *buf)
+static ssize_t temp2_crit_show(struct device *dev,
+                              struct device_attribute *attr, char *buf)
 {
        struct lm87_data *data = lm87_update_device(dev);
        return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_crit_ext));
 }
 
-static DEVICE_ATTR(temp1_crit, S_IRUGO, show_temp_crit_int, NULL);
-static DEVICE_ATTR(temp2_crit, S_IRUGO, show_temp_crit_ext, NULL);
-static DEVICE_ATTR(temp3_crit, S_IRUGO, show_temp_crit_ext, NULL);
+static DEVICE_ATTR_RO(temp1_crit);
+static DEVICE_ATTR_RO(temp2_crit);
+static DEVICE_ATTR(temp3_crit, S_IRUGO, temp2_crit_show, NULL);
 
 static ssize_t show_fan_input(struct device *dev,
                              struct device_attribute *attr, char *buf)
@@ -586,30 +586,30 @@ static SENSOR_DEVICE_ATTR(fan##offset##_div, S_IRUGO | S_IWUSR, \
 set_fan(1);
 set_fan(2);
 
-static ssize_t show_alarms(struct device *dev, struct device_attribute *attr,
+static ssize_t alarms_show(struct device *dev, struct device_attribute *attr,
                           char *buf)
 {
        struct lm87_data *data = lm87_update_device(dev);
        return sprintf(buf, "%d\n", data->alarms);
 }
-static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
+static DEVICE_ATTR_RO(alarms);
 
-static ssize_t show_vid(struct device *dev, struct device_attribute *attr,
-                       char *buf)
+static ssize_t cpu0_vid_show(struct device *dev,
+                            struct device_attribute *attr, char *buf)
 {
        struct lm87_data *data = lm87_update_device(dev);
        return sprintf(buf, "%d\n", vid_from_reg(data->vid, data->vrm));
 }
-static DEVICE_ATTR(cpu0_vid, S_IRUGO, show_vid, NULL);
+static DEVICE_ATTR_RO(cpu0_vid);
 
-static ssize_t show_vrm(struct device *dev, struct device_attribute *attr,
+static ssize_t vrm_show(struct device *dev, struct device_attribute *attr,
                        char *buf)
 {
        struct lm87_data *data = dev_get_drvdata(dev);
        return sprintf(buf, "%d\n", data->vrm);
 }
-static ssize_t set_vrm(struct device *dev, struct device_attribute *attr,
-                      const char *buf, size_t count)
+static ssize_t vrm_store(struct device *dev, struct device_attribute *attr,
+                        const char *buf, size_t count)
 {
        struct lm87_data *data = dev_get_drvdata(dev);
        unsigned long val;
@@ -625,16 +625,17 @@ static ssize_t set_vrm(struct device *dev, struct device_attribute *attr,
        data->vrm = val;
        return count;
 }
-static DEVICE_ATTR(vrm, S_IRUGO | S_IWUSR, show_vrm, set_vrm);
+static DEVICE_ATTR_RW(vrm);
 
-static ssize_t show_aout(struct device *dev, struct device_attribute *attr,
-                        char *buf)
+static ssize_t aout_output_show(struct device *dev,
+                               struct device_attribute *attr, char *buf)
 {
        struct lm87_data *data = lm87_update_device(dev);
        return sprintf(buf, "%d\n", AOUT_FROM_REG(data->aout));
 }
-static ssize_t set_aout(struct device *dev, struct device_attribute *attr,
-                       const char *buf, size_t count)
+static ssize_t aout_output_store(struct device *dev,
+                                struct device_attribute *attr,
+                                const char *buf, size_t count)
 {
        struct i2c_client *client = dev_get_drvdata(dev);
        struct lm87_data *data = i2c_get_clientdata(client);
@@ -651,7 +652,7 @@ static ssize_t set_aout(struct device *dev, struct device_attribute *attr,
        mutex_unlock(&data->update_lock);
        return count;
 }
-static DEVICE_ATTR(aout_output, S_IRUGO | S_IWUSR, show_aout, set_aout);
+static DEVICE_ATTR_RW(aout_output);
 
 static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
                          char *buf)
index 841f2428e84a3936de0a60ee42700efd70933ed2..aff5297bc2bcdc60a578e3df5a40b552796fa30a 100644 (file)
@@ -830,7 +830,7 @@ static u16 temp_to_u16_adt7461(struct lm90_data *data, long val)
 }
 
 /* pec used for ADM1032 only */
-static ssize_t show_pec(struct device *dev, struct device_attribute *dummy,
+static ssize_t pec_show(struct device *dev, struct device_attribute *dummy,
                        char *buf)
 {
        struct i2c_client *client = to_i2c_client(dev);
@@ -838,8 +838,8 @@ static ssize_t show_pec(struct device *dev, struct device_attribute *dummy,
        return sprintf(buf, "%d\n", !!(client->flags & I2C_CLIENT_PEC));
 }
 
-static ssize_t set_pec(struct device *dev, struct device_attribute *dummy,
-                      const char *buf, size_t count)
+static ssize_t pec_store(struct device *dev, struct device_attribute *dummy,
+                        const char *buf, size_t count)
 {
        struct i2c_client *client = to_i2c_client(dev);
        long val;
@@ -863,7 +863,7 @@ static ssize_t set_pec(struct device *dev, struct device_attribute *dummy,
        return count;
 }
 
-static DEVICE_ATTR(pec, S_IWUSR | S_IRUGO, show_pec, set_pec);
+static DEVICE_ATTR_RW(pec);
 
 static int lm90_get_temp11(struct lm90_data *data, int index)
 {
index cfaf70b9cba72951e670f16b8700dd4a5ca152cc..2a91974a10bbd30c36906e3591e0bb30e363aa35 100644 (file)
@@ -181,8 +181,8 @@ static ssize_t show_temp_hyst(struct device *dev,
                       - TEMP_FROM_REG(data->temp[t_hyst]));
 }
 
-static ssize_t show_temp_min_hyst(struct device *dev,
-                                 struct device_attribute *attr, char *buf)
+static ssize_t temp1_min_hyst_show(struct device *dev,
+                                  struct device_attribute *attr, char *buf)
 {
        struct lm92_data *data = lm92_update_device(dev);
        return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp[t_min])
@@ -213,7 +213,7 @@ static ssize_t set_temp_hyst(struct device *dev,
        return count;
 }
 
-static ssize_t show_alarms(struct device *dev, struct device_attribute *attr,
+static ssize_t alarms_show(struct device *dev, struct device_attribute *attr,
                           char *buf)
 {
        struct lm92_data *data = lm92_update_device(dev);
@@ -235,11 +235,11 @@ static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IWUSR | S_IRUGO, show_temp_hyst,
                          set_temp_hyst, t_crit);
 static SENSOR_DEVICE_ATTR(temp1_min, S_IWUSR | S_IRUGO, show_temp, set_temp,
                          t_min);
-static DEVICE_ATTR(temp1_min_hyst, S_IRUGO, show_temp_min_hyst, NULL);
+static DEVICE_ATTR_RO(temp1_min_hyst);
 static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_temp, set_temp,
                          t_max);
 static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IRUGO, show_temp_hyst, NULL, t_max);
-static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
+static DEVICE_ATTR_RO(alarms);
 static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 2);
 static SENSOR_DEVICE_ATTR(temp1_min_alarm, S_IRUGO, show_alarm, NULL, 0);
 static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 1);
index 90bb04858117c530bd1f39362caf818bf5b91b5a..77a0a83399b346d530051e1ebb15a1e20fe53952 100644 (file)
@@ -2156,7 +2156,7 @@ static SENSOR_DEVICE_ATTR(pwm2_auto_spinup_time, S_IWUSR | S_IRUGO,
                          show_pwm_auto_spinup_time,
                          store_pwm_auto_spinup_time, 1);
 
-static ssize_t show_pwm_auto_prochot_ramp(struct device *dev,
+static ssize_t pwm_auto_prochot_ramp_show(struct device *dev,
                                struct device_attribute *attr, char *buf)
 {
        struct lm93_data *data = lm93_update_device(dev);
@@ -2164,7 +2164,7 @@ static ssize_t show_pwm_auto_prochot_ramp(struct device *dev,
                       LM93_RAMP_FROM_REG(data->pwm_ramp_ctl >> 4 & 0x0f));
 }
 
-static ssize_t store_pwm_auto_prochot_ramp(struct device *dev,
+static ssize_t pwm_auto_prochot_ramp_store(struct device *dev,
                                                struct device_attribute *attr,
                                                const char *buf, size_t count)
 {
@@ -2186,11 +2186,9 @@ static ssize_t store_pwm_auto_prochot_ramp(struct device *dev,
        return count;
 }
 
-static DEVICE_ATTR(pwm_auto_prochot_ramp, S_IRUGO | S_IWUSR,
-                       show_pwm_auto_prochot_ramp,
-                       store_pwm_auto_prochot_ramp);
+static DEVICE_ATTR_RW(pwm_auto_prochot_ramp);
 
-static ssize_t show_pwm_auto_vrdhot_ramp(struct device *dev,
+static ssize_t pwm_auto_vrdhot_ramp_show(struct device *dev,
                                struct device_attribute *attr, char *buf)
 {
        struct lm93_data *data = lm93_update_device(dev);
@@ -2198,7 +2196,7 @@ static ssize_t show_pwm_auto_vrdhot_ramp(struct device *dev,
                       LM93_RAMP_FROM_REG(data->pwm_ramp_ctl & 0x0f));
 }
 
-static ssize_t store_pwm_auto_vrdhot_ramp(struct device *dev,
+static ssize_t pwm_auto_vrdhot_ramp_store(struct device *dev,
                                                struct device_attribute *attr,
                                                const char *buf, size_t count)
 {
@@ -2220,9 +2218,7 @@ static ssize_t store_pwm_auto_vrdhot_ramp(struct device *dev,
        return 0;
 }
 
-static DEVICE_ATTR(pwm_auto_vrdhot_ramp, S_IRUGO | S_IWUSR,
-                       show_pwm_auto_vrdhot_ramp,
-                       store_pwm_auto_vrdhot_ramp);
+static DEVICE_ATTR_RW(pwm_auto_vrdhot_ramp);
 
 static ssize_t show_vid(struct device *dev, struct device_attribute *attr,
                        char *buf)
@@ -2378,7 +2374,7 @@ static SENSOR_DEVICE_ATTR(prochot1_interval, S_IWUSR | S_IRUGO,
 static SENSOR_DEVICE_ATTR(prochot2_interval, S_IWUSR | S_IRUGO,
                          show_prochot_interval, store_prochot_interval, 1);
 
-static ssize_t show_prochot_override_duty_cycle(struct device *dev,
+static ssize_t prochot_override_duty_cycle_show(struct device *dev,
                                                struct device_attribute *attr,
                                                char *buf)
 {
@@ -2386,7 +2382,7 @@ static ssize_t show_prochot_override_duty_cycle(struct device *dev,
        return sprintf(buf, "%d\n", data->prochot_override & 0x0f);
 }
 
-static ssize_t store_prochot_override_duty_cycle(struct device *dev,
+static ssize_t prochot_override_duty_cycle_store(struct device *dev,
                                                struct device_attribute *attr,
                                                const char *buf, size_t count)
 {
@@ -2408,18 +2404,16 @@ static ssize_t store_prochot_override_duty_cycle(struct device *dev,
        return count;
 }
 
-static DEVICE_ATTR(prochot_override_duty_cycle, S_IRUGO | S_IWUSR,
-                       show_prochot_override_duty_cycle,
-                       store_prochot_override_duty_cycle);
+static DEVICE_ATTR_RW(prochot_override_duty_cycle);
 
-static ssize_t show_prochot_short(struct device *dev,
+static ssize_t prochot_short_show(struct device *dev,
                                struct device_attribute *attr, char *buf)
 {
        struct lm93_data *data = lm93_update_device(dev);
        return sprintf(buf, "%d\n", (data->config & 0x10) ? 1 : 0);
 }
 
-static ssize_t store_prochot_short(struct device *dev,
+static ssize_t prochot_short_store(struct device *dev,
                                        struct device_attribute *attr,
                                        const char *buf, size_t count)
 {
@@ -2442,8 +2436,7 @@ static ssize_t store_prochot_short(struct device *dev,
        return count;
 }
 
-static DEVICE_ATTR(prochot_short, S_IRUGO | S_IWUSR,
-                  show_prochot_short, store_prochot_short);
+static DEVICE_ATTR_RW(prochot_short);
 
 static ssize_t show_vrdhot(struct device *dev, struct device_attribute *attr,
                                char *buf)
@@ -2457,23 +2450,23 @@ static ssize_t show_vrdhot(struct device *dev, struct device_attribute *attr,
 static SENSOR_DEVICE_ATTR(vrdhot1, S_IRUGO, show_vrdhot, NULL, 0);
 static SENSOR_DEVICE_ATTR(vrdhot2, S_IRUGO, show_vrdhot, NULL, 1);
 
-static ssize_t show_gpio(struct device *dev, struct device_attribute *attr,
+static ssize_t gpio_show(struct device *dev, struct device_attribute *attr,
                                char *buf)
 {
        struct lm93_data *data = lm93_update_device(dev);
        return sprintf(buf, "%d\n", LM93_GPI_FROM_REG(data->gpi));
 }
 
-static DEVICE_ATTR(gpio, S_IRUGO, show_gpio, NULL);
+static DEVICE_ATTR_RO(gpio);
 
-static ssize_t show_alarms(struct device *dev, struct device_attribute *attr,
+static ssize_t alarms_show(struct device *dev, struct device_attribute *attr,
                                char *buf)
 {
        struct lm93_data *data = lm93_update_device(dev);
        return sprintf(buf, "%d\n", LM93_ALARMS_FROM_REG(data->block1));
 }
 
-static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
+static DEVICE_ATTR_RO(alarms);
 
 static struct attribute *lm93_attrs[] = {
        &sensor_dev_attr_in1_input.dev_attr.attr,
index 8796de39ff9bbc07a2a66f9f33c30dcfa2e8d724..c7fcc9e7f57a233e2e823faa6ebaa01628641a53 100644 (file)
@@ -450,8 +450,8 @@ static ssize_t set_offset(struct device *dev, struct device_attribute *attr,
        return count;
 }
 
-static ssize_t show_interval(struct device *dev, struct device_attribute *attr,
-                            char *buf)
+static ssize_t update_interval_show(struct device *dev,
+                                   struct device_attribute *attr, char *buf)
 {
        struct lm95234_data *data = dev_get_drvdata(dev);
        int ret = lm95234_update_device(data);
@@ -463,8 +463,9 @@ static ssize_t show_interval(struct device *dev, struct device_attribute *attr,
                       DIV_ROUND_CLOSEST(data->interval * 1000, HZ));
 }
 
-static ssize_t set_interval(struct device *dev, struct device_attribute *attr,
-                           const char *buf, size_t count)
+static ssize_t update_interval_store(struct device *dev,
+                                    struct device_attribute *attr,
+                                    const char *buf, size_t count)
 {
        struct lm95234_data *data = dev_get_drvdata(dev);
        int ret = lm95234_update_device(data);
@@ -566,8 +567,7 @@ static SENSOR_DEVICE_ATTR(temp4_offset, S_IWUSR | S_IRUGO, show_offset,
 static SENSOR_DEVICE_ATTR(temp5_offset, S_IWUSR | S_IRUGO, show_offset,
                          set_offset, 3);
 
-static DEVICE_ATTR(update_interval, S_IWUSR | S_IRUGO, show_interval,
-                  set_interval);
+static DEVICE_ATTR_RW(update_interval);
 
 static struct attribute *lm95234_common_attrs[] = {
        &sensor_dev_attr_temp1_input.dev_attr.attr,
index 8445c9fd946b17ecc9d29f596bab47db8039facf..b904cb547ffb064bbeee204373a959f5a059c356 100644 (file)
@@ -215,6 +215,7 @@ static const struct of_device_id ltc4151_match[] = {
        { .compatible = "lltc,ltc4151" },
        {},
 };
+MODULE_DEVICE_TABLE(of, ltc4151_match);
 
 /* This is the driver that will be inserted */
 static struct i2c_driver ltc4151_driver = {
index 303d0c9df907a722e6d366b07a7f4c8d8a7c728b..8ddd4d69065214442716c943c82f5af87a73bb8a 100644 (file)
@@ -98,7 +98,7 @@ EXPORT_SYMBOL(max1111_read_channel);
  * likely to be used by hwmon applications to distinguish between
  * different devices, explicitly add a name attribute here.
  */
-static ssize_t show_name(struct device *dev,
+static ssize_t name_show(struct device *dev,
                         struct device_attribute *attr, char *buf)
 {
        return sprintf(buf, "%s\n", to_spi_device(dev)->modalias);
@@ -125,7 +125,7 @@ static ssize_t show_adc(struct device *dev,
 #define MAX1111_ADC_ATTR(_id)          \
        SENSOR_DEVICE_ATTR(in##_id##_input, S_IRUGO, show_adc, NULL, _id)
 
-static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
+static DEVICE_ATTR_RO(name);
 static MAX1111_ADC_ATTR(0);
 static MAX1111_ADC_ATTR(1);
 static MAX1111_ADC_ATTR(2);
index eda9cf5996856f14553844aa1cec93d130f15a0a..a182789384948d2f08f6c11aee20b9fac7f8b20d 100644 (file)
@@ -173,7 +173,7 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *devattr,
        return count;
 }
 
-static ssize_t show_alarms(struct device *dev, struct device_attribute *attr,
+static ssize_t alarms_show(struct device *dev, struct device_attribute *attr,
                           char *buf)
 {
        struct max1619_data *data = max1619_update_device(dev);
@@ -199,7 +199,7 @@ static SENSOR_DEVICE_ATTR(temp2_crit, S_IWUSR | S_IRUGO, show_temp, set_temp,
 static SENSOR_DEVICE_ATTR(temp2_crit_hyst, S_IWUSR | S_IRUGO, show_temp,
                          set_temp, t_hyst2);
 
-static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
+static DEVICE_ATTR_RO(alarms);
 static SENSOR_DEVICE_ATTR(temp2_crit_alarm, S_IRUGO, show_alarm, NULL, 1);
 static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_alarm, NULL, 2);
 static SENSOR_DEVICE_ATTR(temp2_min_alarm, S_IRUGO, show_alarm, NULL, 3);
index 07628569547aee23ffd60daceefb893101a14acb..638567fb7cd80c8c7f78d50c61b00ed13ba0f0ce 100644 (file)
@@ -207,8 +207,8 @@ unlock:
        return ret;
 }
 
-static ssize_t max197_show_name(struct device *dev,
-                               struct device_attribute *attr, char *buf)
+static ssize_t name_show(struct device *dev, struct device_attribute *attr,
+                        char *buf)
 {
        struct platform_device *pdev = to_platform_device(dev);
        return sprintf(buf, "%s\n", pdev->name);
@@ -231,7 +231,7 @@ static ssize_t max197_show_name(struct device *dev,
        &sensor_dev_attr_in##chan##_max.dev_attr.attr,                  \
        &sensor_dev_attr_in##chan##_min.dev_attr.attr
 
-static DEVICE_ATTR(name, S_IRUGO, max197_show_name, NULL);
+static DEVICE_ATTR_RO(name);
 
 MAX197_SENSOR_DEVICE_ATTR_CH(0);
 MAX197_SENSOR_DEVICE_ATTR_CH(1);
index a993b44ed53849e001a94ffbc6ea574a9f915627..65be4b19fe47d8912b33a1dab20bd16e76578a8b 100644 (file)
@@ -270,8 +270,8 @@ static ssize_t get_fan(struct device *dev, struct device_attribute *devattr,
  * controlled.
  */
 
-static ssize_t get_target(struct device *dev, struct device_attribute *devattr,
-                        char *buf)
+static ssize_t fan1_target_show(struct device *dev,
+                               struct device_attribute *devattr, char *buf)
 {
        struct max6650_data *data = max6650_update_device(dev);
        int kscale, ktach, rpm;
@@ -318,8 +318,9 @@ static int max6650_set_target(struct max6650_data *data, unsigned long rpm)
                                         data->speed);
 }
 
-static ssize_t set_target(struct device *dev, struct device_attribute *devattr,
-                        const char *buf, size_t count)
+static ssize_t fan1_target_store(struct device *dev,
+                                struct device_attribute *devattr,
+                                const char *buf, size_t count)
 {
        struct max6650_data *data = dev_get_drvdata(dev);
        unsigned long rpm;
@@ -350,8 +351,8 @@ static ssize_t set_target(struct device *dev, struct device_attribute *devattr,
  * back exactly the value you have set.
  */
 
-static ssize_t get_pwm(struct device *dev, struct device_attribute *devattr,
-                      char *buf)
+static ssize_t pwm1_show(struct device *dev, struct device_attribute *devattr,
+                        char *buf)
 {
        int pwm;
        struct max6650_data *data = max6650_update_device(dev);
@@ -371,8 +372,9 @@ static ssize_t get_pwm(struct device *dev, struct device_attribute *devattr,
        return sprintf(buf, "%d\n", pwm);
 }
 
-static ssize_t set_pwm(struct device *dev, struct device_attribute *devattr,
-                       const char *buf, size_t count)
+static ssize_t pwm1_store(struct device *dev,
+                         struct device_attribute *devattr, const char *buf,
+                         size_t count)
 {
        struct max6650_data *data = dev_get_drvdata(dev);
        struct i2c_client *client = data->client;
@@ -406,8 +408,8 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *devattr,
  * 2 = Closed loop, RPM for all fans regulated by fan1 tachometer
  * 3 = Fan off
  */
-static ssize_t get_enable(struct device *dev, struct device_attribute *devattr,
-                         char *buf)
+static ssize_t pwm1_enable_show(struct device *dev,
+                               struct device_attribute *devattr, char *buf)
 {
        struct max6650_data *data = max6650_update_device(dev);
        int mode = (data->config & MAX6650_CFG_MODE_MASK) >> 4;
@@ -416,8 +418,9 @@ static ssize_t get_enable(struct device *dev, struct device_attribute *devattr,
        return sprintf(buf, "%d\n", sysfs_modes[mode]);
 }
 
-static ssize_t set_enable(struct device *dev, struct device_attribute *devattr,
-                         const char *buf, size_t count)
+static ssize_t pwm1_enable_store(struct device *dev,
+                                struct device_attribute *devattr,
+                                const char *buf, size_t count)
 {
        struct max6650_data *data = dev_get_drvdata(dev);
        unsigned long mode;
@@ -458,16 +461,17 @@ static ssize_t set_enable(struct device *dev, struct device_attribute *devattr,
  * defined for that. See the data sheet for details.
  */
 
-static ssize_t get_div(struct device *dev, struct device_attribute *devattr,
-                      char *buf)
+static ssize_t fan1_div_show(struct device *dev,
+                            struct device_attribute *devattr, char *buf)
 {
        struct max6650_data *data = max6650_update_device(dev);
 
        return sprintf(buf, "%d\n", DIV_FROM_REG(data->count));
 }
 
-static ssize_t set_div(struct device *dev, struct device_attribute *devattr,
-                      const char *buf, size_t count)
+static ssize_t fan1_div_store(struct device *dev,
+                             struct device_attribute *devattr,
+                             const char *buf, size_t count)
 {
        struct max6650_data *data = dev_get_drvdata(dev);
        struct i2c_client *client = data->client;
@@ -534,10 +538,10 @@ static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, get_fan, NULL, 0);
 static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, get_fan, NULL, 1);
 static SENSOR_DEVICE_ATTR(fan3_input, S_IRUGO, get_fan, NULL, 2);
 static SENSOR_DEVICE_ATTR(fan4_input, S_IRUGO, get_fan, NULL, 3);
-static DEVICE_ATTR(fan1_target, S_IWUSR | S_IRUGO, get_target, set_target);
-static DEVICE_ATTR(fan1_div, S_IWUSR | S_IRUGO, get_div, set_div);
-static DEVICE_ATTR(pwm1_enable, S_IWUSR | S_IRUGO, get_enable, set_enable);
-static DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, get_pwm, set_pwm);
+static DEVICE_ATTR_RW(fan1_target);
+static DEVICE_ATTR_RW(fan1_div);
+static DEVICE_ATTR_RW(pwm1_enable);
+static DEVICE_ATTR_RW(pwm1);
 static SENSOR_DEVICE_ATTR(fan1_max_alarm, S_IRUGO, get_alarm, NULL,
                          MAX6650_ALRM_MAX);
 static SENSOR_DEVICE_ATTR(fan1_min_alarm, S_IRUGO, get_alarm, NULL,
index 0c02f40eb0c1baf28768bedbc57b0106a5bc0e39..960a1db6f269584337594e5df4689bbcba2d653b 100644 (file)
@@ -40,8 +40,8 @@ struct mc13783_adc_priv {
        char name[PLATFORM_NAME_SIZE];
 };
 
-static ssize_t mc13783_adc_show_name(struct device *dev, struct device_attribute
-                             *devattr, char *buf)
+static ssize_t name_show(struct device *dev, struct device_attribute *devattr,
+                        char *buf)
 {
        struct mc13783_adc_priv *priv = dev_get_drvdata(dev);
 
@@ -111,7 +111,7 @@ static ssize_t mc13783_adc_read_gp(struct device *dev,
        return sprintf(buf, "%u\n", val);
 }
 
-static DEVICE_ATTR(name, S_IRUGO, mc13783_adc_show_name, NULL);
+static DEVICE_ATTR_RO(name);
 static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, mc13783_adc_read_bp, NULL, 2);
 static SENSOR_DEVICE_ATTR(in5_input, S_IRUGO, mc13783_adc_read_gp, NULL, 5);
 static SENSOR_DEVICE_ATTR(in6_input, S_IRUGO, mc13783_adc_read_gp, NULL, 6);
index 1929734c3b1d9e52a5c870093acc80fcc44101c0..de886f82101bb86a2d983d3f9b7dc39ed2357733 100644 (file)
@@ -86,8 +86,8 @@ static inline u16 volts_from_reg(struct mcp3021_data *data, u16 val)
        return DIV_ROUND_CLOSEST(data->vdd * val, 1 << data->output_res);
 }
 
-static ssize_t show_in_input(struct device *dev, struct device_attribute *attr,
-               char *buf)
+static ssize_t in0_input_show(struct device *dev,
+                             struct device_attribute *attr, char *buf)
 {
        struct i2c_client *client = to_i2c_client(dev);
        struct mcp3021_data *data = i2c_get_clientdata(client);
@@ -102,7 +102,7 @@ static ssize_t show_in_input(struct device *dev, struct device_attribute *attr,
        return sprintf(buf, "%d\n", in_input);
 }
 
-static DEVICE_ATTR(in0_input, 0444, show_in_input, NULL);
+static DEVICE_ATTR_RO(in0_input);
 
 static int mcp3021_probe(struct i2c_client *client,
                                const struct i2c_device_id *id)
index 559c596b24f9b916d9f1fe6b85c7eabc8f1868d9..8b0bc4fc06e8ccccdd94029bcb541b436d473469 100644 (file)
@@ -979,7 +979,7 @@ static const struct sensor_template_group nct6683_pwm_template_group = {
 };
 
 static ssize_t
-show_global_beep(struct device *dev, struct device_attribute *attr, char *buf)
+beep_enable_show(struct device *dev, struct device_attribute *attr, char *buf)
 {
        struct nct6683_data *data = dev_get_drvdata(dev);
        int ret;
@@ -1004,7 +1004,7 @@ error:
 }
 
 static ssize_t
-store_global_beep(struct device *dev, struct device_attribute *attr,
+beep_enable_store(struct device *dev, struct device_attribute *attr,
                  const char *buf, size_t count)
 {
        struct nct6683_data *data = dev_get_drvdata(dev);
@@ -1039,7 +1039,8 @@ error:
 /* Case open detection */
 
 static ssize_t
-show_caseopen(struct device *dev, struct device_attribute *attr, char *buf)
+intrusion0_alarm_show(struct device *dev, struct device_attribute *attr,
+                     char *buf)
 {
        struct nct6683_data *data = dev_get_drvdata(dev);
        int ret;
@@ -1064,8 +1065,8 @@ error:
 }
 
 static ssize_t
-clear_caseopen(struct device *dev, struct device_attribute *attr,
-              const char *buf, size_t count)
+intrusion0_alarm_store(struct device *dev, struct device_attribute *attr,
+                      const char *buf, size_t count)
 {
        struct nct6683_data *data = dev_get_drvdata(dev);
        unsigned long val;
@@ -1102,10 +1103,8 @@ error:
        return count;
 }
 
-static DEVICE_ATTR(intrusion0_alarm, S_IWUSR | S_IRUGO, show_caseopen,
-                  clear_caseopen);
-static DEVICE_ATTR(beep_enable, S_IWUSR | S_IRUGO, show_global_beep,
-                  store_global_beep);
+static DEVICE_ATTR_RW(intrusion0_alarm);
+static DEVICE_ATTR_RW(beep_enable);
 
 static struct attribute *nct6683_attributes_other[] = {
        &dev_attr_intrusion0_alarm.attr,
index ce75dd4db7eb4618e1a3adeb31cd41eff6808287..2458b406f6aa27804f42a1171c82c5c763aed70c 100644 (file)
@@ -3127,14 +3127,14 @@ static const struct sensor_template_group nct6775_pwm_template_group = {
 };
 
 static ssize_t
-show_vid(struct device *dev, struct device_attribute *attr, char *buf)
+cpu0_vid_show(struct device *dev, struct device_attribute *attr, char *buf)
 {
        struct nct6775_data *data = dev_get_drvdata(dev);
 
        return sprintf(buf, "%d\n", vid_from_reg(data->vid, data->vrm));
 }
 
-static DEVICE_ATTR(cpu0_vid, S_IRUGO, show_vid, NULL);
+static DEVICE_ATTR_RO(cpu0_vid);
 
 /* Case open detection */
 
index 0517a265741f16c2ef8fdaf7091519ea57979558..5a16109cdea85527ec235018e80cd7bb455c7a02 100644 (file)
@@ -122,8 +122,8 @@ static ssize_t show_label(struct device *dev,
        return sprintf(buf, "%s\n", nsa320_input_names[channel]);
 }
 
-static ssize_t show_temp(struct device *dev, struct device_attribute *attr,
-                        char *buf)
+static ssize_t temp1_input_show(struct device *dev,
+                               struct device_attribute *attr, char *buf)
 {
        s32 mcu_data = nsa320_hwmon_update(dev);
 
@@ -133,8 +133,8 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *attr,
        return sprintf(buf, "%d\n", (mcu_data & 0xffff) * 100);
 }
 
-static ssize_t show_fan(struct device *dev, struct device_attribute *attr,
-                       char *buf)
+static ssize_t fan1_input_show(struct device *dev,
+                              struct device_attribute *attr, char *buf)
 {
        s32 mcu_data = nsa320_hwmon_update(dev);
 
@@ -145,9 +145,9 @@ static ssize_t show_fan(struct device *dev, struct device_attribute *attr,
 }
 
 static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, show_label, NULL, NSA320_TEMP);
-static DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL);
+static DEVICE_ATTR_RO(temp1_input);
 static SENSOR_DEVICE_ATTR(fan1_label, S_IRUGO, show_label, NULL, NSA320_FAN);
-static DEVICE_ATTR(fan1_input, S_IRUGO, show_fan, NULL);
+static DEVICE_ATTR_RO(fan1_input);
 
 static struct attribute *nsa320_attrs[] = {
        &sensor_dev_attr_temp1_label.dev_attr.attr,
index d50fbf93a7370784902266b2e3185a02f3fea872..7e369772753792f4b95930620ec559d02776bc7c 100644 (file)
@@ -589,22 +589,22 @@ static struct sensor_device_attribute in_max_alarm[] = {
        &in_min_alarm[X].dev_attr.attr, \
        &in_max_alarm[X].dev_attr.attr
 
-static ssize_t show_vid(struct device *dev, struct device_attribute *attr,
-                       char *buf)
+static ssize_t cpu0_vid_show(struct device *dev,
+                            struct device_attribute *attr, char *buf)
 {
        struct pc87360_data *data = pc87360_update_device(dev);
        return sprintf(buf, "%u\n", vid_from_reg(data->vid, data->vrm));
 }
-static DEVICE_ATTR(cpu0_vid, S_IRUGO, show_vid, NULL);
+static DEVICE_ATTR_RO(cpu0_vid);
 
-static ssize_t show_vrm(struct device *dev, struct device_attribute *attr,
+static ssize_t vrm_show(struct device *dev, struct device_attribute *attr,
                        char *buf)
 {
        struct pc87360_data *data = dev_get_drvdata(dev);
        return sprintf(buf, "%u\n", data->vrm);
 }
-static ssize_t set_vrm(struct device *dev, struct device_attribute *attr,
-                      const char *buf, size_t count)
+static ssize_t vrm_store(struct device *dev, struct device_attribute *attr,
+                        const char *buf, size_t count)
 {
        struct pc87360_data *data = dev_get_drvdata(dev);
        unsigned long val;
@@ -620,15 +620,15 @@ static ssize_t set_vrm(struct device *dev, struct device_attribute *attr,
        data->vrm = val;
        return count;
 }
-static DEVICE_ATTR(vrm, S_IRUGO | S_IWUSR, show_vrm, set_vrm);
+static DEVICE_ATTR_RW(vrm);
 
-static ssize_t show_in_alarms(struct device *dev,
+static ssize_t alarms_in_show(struct device *dev,
                              struct device_attribute *attr, char *buf)
 {
        struct pc87360_data *data = pc87360_update_device(dev);
        return sprintf(buf, "%u\n", data->in_alarms);
 }
-static DEVICE_ATTR(alarms_in, S_IRUGO, show_in_alarms, NULL);
+static DEVICE_ATTR_RO(alarms_in);
 
 static struct attribute *pc8736x_vin_attr_array[] = {
        VIN_UNIT_ATTRS(0),
@@ -1006,14 +1006,14 @@ static struct sensor_device_attribute temp_crit[] = {
                    show_temp_crit, set_temp_crit, 2),
 };
 
-static ssize_t show_temp_alarms(struct device *dev,
+static ssize_t alarms_temp_show(struct device *dev,
                                struct device_attribute *attr, char *buf)
 {
        struct pc87360_data *data = pc87360_update_device(dev);
        return sprintf(buf, "%u\n", data->temp_alarms);
 }
 
-static DEVICE_ATTR(alarms_temp, S_IRUGO, show_temp_alarms, NULL);
+static DEVICE_ATTR_RO(alarms_temp);
 
 /*
  * show_temp_min/max_alarm() reads data from the per-channel status
@@ -1106,14 +1106,14 @@ static const struct attribute_group pc8736x_temp_attr_group[] = {
        { .attrs = pc8736x_temp_attr[2] }
 };
 
-static ssize_t show_name(struct device *dev,
+static ssize_t name_show(struct device *dev,
                        struct device_attribute *devattr, char *buf)
 {
        struct pc87360_data *data = dev_get_drvdata(dev);
        return sprintf(buf, "%s\n", data->name);
 }
 
-static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
+static DEVICE_ATTR_RO(name);
 
 /*
  * Device detection, registration and update
index cb9fdd37bd0d9c5111a4481255022e4ec26e5d4c..dc5a9d5ada516c840f73ee7f33971ec1bcdddcfc 100644 (file)
@@ -943,14 +943,14 @@ static const struct attribute_group pc87427_group_temp[6] = {
        { .attrs = pc87427_attributes_temp[5] },
 };
 
-static ssize_t show_name(struct device *dev, struct device_attribute
+static ssize_t name_show(struct device *dev, struct device_attribute
                         *devattr, char *buf)
 {
        struct pc87427_data *data = dev_get_drvdata(dev);
 
        return sprintf(buf, "%s\n", data->name);
 }
-static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
+static DEVICE_ATTR_RO(name);
 
 
 /*
index 5740888c62426b3a45b8b8181a78253123944779..60e25c85e71cd88e667b8d75367b6139a5c79b28 100644 (file)
@@ -103,16 +103,16 @@ show_in_channel(1);
 show_in_channel(2);
 show_in_channel(3);
 
-static ssize_t show_out0_ouput(struct device *dev,
-                              struct device_attribute *attr, char *buf)
+static ssize_t out0_output_show(struct device *dev,
+                               struct device_attribute *attr, char *buf)
 {
        struct pcf8591_data *data = i2c_get_clientdata(to_i2c_client(dev));
        return sprintf(buf, "%d\n", data->aout * 10);
 }
 
-static ssize_t set_out0_output(struct device *dev,
-                              struct device_attribute *attr,
-                              const char *buf, size_t count)
+static ssize_t out0_output_store(struct device *dev,
+                                struct device_attribute *attr,
+                                const char *buf, size_t count)
 {
        unsigned long val;
        struct i2c_client *client = to_i2c_client(dev);
@@ -132,19 +132,18 @@ static ssize_t set_out0_output(struct device *dev,
        return count;
 }
 
-static DEVICE_ATTR(out0_output, S_IWUSR | S_IRUGO,
-                  show_out0_ouput, set_out0_output);
+static DEVICE_ATTR_RW(out0_output);
 
-static ssize_t show_out0_enable(struct device *dev,
+static ssize_t out0_enable_show(struct device *dev,
                                struct device_attribute *attr, char *buf)
 {
        struct pcf8591_data *data = i2c_get_clientdata(to_i2c_client(dev));
        return sprintf(buf, "%u\n", !(!(data->control & PCF8591_CONTROL_AOEF)));
 }
 
-static ssize_t set_out0_enable(struct device *dev,
-                              struct device_attribute *attr,
-                              const char *buf, size_t count)
+static ssize_t out0_enable_store(struct device *dev,
+                                struct device_attribute *attr,
+                                const char *buf, size_t count)
 {
        struct i2c_client *client = to_i2c_client(dev);
        struct pcf8591_data *data = i2c_get_clientdata(client);
@@ -165,8 +164,7 @@ static ssize_t set_out0_enable(struct device *dev,
        return count;
 }
 
-static DEVICE_ATTR(out0_enable, S_IWUSR | S_IRUGO,
-                  show_out0_enable, set_out0_enable);
+static DEVICE_ATTR_RW(out0_enable);
 
 static struct attribute *pcf8591_attributes[] = {
        &dev_attr_out0_enable.attr,
index 19f85c0da2709fa2ebedad3d901ec799bce77129..91544f2312e6d4ef51fe244b187b6fd7e11edc91 100644 (file)
@@ -205,7 +205,7 @@ static int reg_to_rpm(u16 reg)
        return 5400540 / reg;
 }
 
-static ssize_t show_name(struct device *dev, struct device_attribute *devattr,
+static ssize_t name_show(struct device *dev, struct device_attribute *devattr,
        char *buf)
 {
        return snprintf(buf, PAGE_SIZE, "%s\n", DEVNAME);
@@ -326,7 +326,7 @@ static ssize_t show_in_label(struct device *dev, struct device_attribute
                        SCH5627_IN_LABELS[attr->index]);
 }
 
-static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
+static DEVICE_ATTR_RO(name);
 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0);
 static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp, NULL, 1);
 static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, show_temp, NULL, 2);
index 68c350c704fba85ff517f0d5f726ed53d367e45d..bda3d52855861530981704d7e68ad9ef1cb1dc1c 100644 (file)
@@ -28,7 +28,6 @@
 #include <linux/delay.h>
 #include <linux/fs.h>
 #include <linux/watchdog.h>
-#include <linux/miscdevice.h>
 #include <linux/uaccess.h>
 #include <linux/slab.h>
 #include "sch56xx-common.h"
index a2fdbb7d20edcc92f9a1307f4f7105475b41e7ca..e4d642b673c6d9e5dfebb7bc23360f4d8f46fb3e 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/slab.h>
 #include <linux/atomic.h>
 #include <linux/bitrev.h>
+#include <linux/of_gpio.h>
 
 /* Commands */
 #define SHT15_MEASURE_TEMP             0x03
@@ -769,7 +770,7 @@ static ssize_t sht15_show_humidity(struct device *dev,
        return ret ? ret : sprintf(buf, "%d\n", sht15_calc_humid(data));
 }
 
-static ssize_t show_name(struct device *dev,
+static ssize_t name_show(struct device *dev,
                         struct device_attribute *attr,
                         char *buf)
 {
@@ -787,7 +788,7 @@ static SENSOR_DEVICE_ATTR(humidity1_fault, S_IRUGO, sht15_show_status, NULL,
                          SHT15_STATUS_LOW_BATTERY);
 static SENSOR_DEVICE_ATTR(heater_enable, S_IRUGO | S_IWUSR, sht15_show_status,
                          sht15_store_heater, SHT15_STATUS_HEATER);
-static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
+static DEVICE_ATTR_RO(name);
 static struct attribute *sht15_attrs[] = {
        &sensor_dev_attr_temp1_input.dev_attr.attr,
        &sensor_dev_attr_humidity1_input.dev_attr.attr,
@@ -911,6 +912,54 @@ static int sht15_invalidate_voltage(struct notifier_block *nb,
        return NOTIFY_OK;
 }
 
+#ifdef CONFIG_OF
+static const struct of_device_id sht15_dt_match[] = {
+       { .compatible = "sensirion,sht15" },
+       { },
+};
+MODULE_DEVICE_TABLE(of, sht15_dt_match);
+
+/*
+ * This function returns NULL if pdev isn't a device instatiated by dt,
+ * a pointer to pdata if it could successfully get all information
+ * from dt or a negative ERR_PTR() on error.
+ */
+static struct sht15_platform_data *sht15_probe_dt(struct device *dev)
+{
+       struct device_node *np = dev->of_node;
+       struct sht15_platform_data *pdata;
+
+       /* no device tree device */
+       if (!np)
+               return NULL;
+
+       pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+       if (!pdata)
+               return ERR_PTR(-ENOMEM);
+
+       pdata->gpio_data = of_get_named_gpio(np, "data-gpios", 0);
+       if (pdata->gpio_data < 0) {
+               if (pdata->gpio_data != -EPROBE_DEFER)
+                       dev_err(dev, "data-gpios not found\n");
+               return ERR_PTR(pdata->gpio_data);
+       }
+
+       pdata->gpio_sck = of_get_named_gpio(np, "clk-gpios", 0);
+       if (pdata->gpio_sck < 0) {
+               if (pdata->gpio_sck != -EPROBE_DEFER)
+                       dev_err(dev, "clk-gpios not found\n");
+               return ERR_PTR(pdata->gpio_sck);
+       }
+
+       return pdata;
+}
+#else
+static inline struct sht15_platform_data *sht15_probe_dt(struct device *dev)
+{
+       return NULL;
+}
+#endif
+
 static int sht15_probe(struct platform_device *pdev)
 {
        int ret;
@@ -928,11 +977,17 @@ static int sht15_probe(struct platform_device *pdev)
        data->dev = &pdev->dev;
        init_waitqueue_head(&data->wait_queue);
 
-       if (dev_get_platdata(&pdev->dev) == NULL) {
-               dev_err(&pdev->dev, "no platform data supplied\n");
-               return -EINVAL;
+       data->pdata = sht15_probe_dt(&pdev->dev);
+       if (IS_ERR(data->pdata))
+               return PTR_ERR(data->pdata);
+       if (data->pdata == NULL) {
+               data->pdata = dev_get_platdata(&pdev->dev);
+               if (data->pdata == NULL) {
+                       dev_err(&pdev->dev, "no platform data supplied\n");
+                       return -EINVAL;
+               }
        }
-       data->pdata = dev_get_platdata(&pdev->dev);
+
        data->supply_uv = data->pdata->supply_mv * 1000;
        if (data->pdata->checksum)
                data->checksumming = true;
@@ -1075,6 +1130,7 @@ MODULE_DEVICE_TABLE(platform, sht15_device_ids);
 static struct platform_driver sht15_driver = {
        .driver = {
                .name = "sht15",
+               .of_match_table = of_match_ptr(sht15_dt_match),
        },
        .probe = sht15_probe,
        .remove = sht15_remove,
index 84cdb1cf0fb42ac7e36098086bf24a6c7b0b96cb..06706d288355b8e13a794579b06a13b72852b0bb 100644 (file)
 /* I2C command bytes */
 #define SHT21_TRIG_T_MEASUREMENT_HM  0xe3
 #define SHT21_TRIG_RH_MEASUREMENT_HM 0xe5
+#define SHT21_READ_SNB_CMD1 0xFA
+#define SHT21_READ_SNB_CMD2 0x0F
+#define SHT21_READ_SNAC_CMD1 0xFC
+#define SHT21_READ_SNAC_CMD2 0xC9
 
 /**
  * struct sht21 - SHT21 device specific data
  * @hwmon_dev: device registered with hwmon
  * @lock: mutex to protect measurement values
- * @valid: only 0 before first measurement is taken
  * @last_update: time of last update (jiffies)
  * @temperature: cached temperature measurement value
  * @humidity: cached humidity measurement value
+ * @valid: only 0 before first measurement is taken
+ * @eic: cached electronic identification code text
  */
 struct sht21 {
        struct i2c_client *client;
        struct mutex lock;
-       char valid;
        unsigned long last_update;
        int temperature;
        int humidity;
+       char valid;
+       char eic[18];
 };
 
 /**
@@ -165,15 +171,97 @@ static ssize_t sht21_show_humidity(struct device *dev,
        return sprintf(buf, "%d\n", sht21->humidity);
 }
 
+static ssize_t eic_read(struct sht21 *sht21)
+{
+       struct i2c_client *client = sht21->client;
+       u8 tx[2];
+       u8 rx[8];
+       u8 eic[8];
+       struct i2c_msg msgs[2] = {
+               {
+                       .addr = client->addr,
+                       .flags = 0,
+                       .len = 2,
+                       .buf = tx,
+               },
+               {
+                       .addr = client->addr,
+                       .flags = I2C_M_RD,
+                       .len = 8,
+                       .buf = rx,
+               },
+       };
+       int ret;
+
+       tx[0] = SHT21_READ_SNB_CMD1;
+       tx[1] = SHT21_READ_SNB_CMD2;
+       ret = i2c_transfer(client->adapter, msgs, 2);
+       if (ret < 0)
+               goto out;
+       eic[2] = rx[0];
+       eic[3] = rx[2];
+       eic[4] = rx[4];
+       eic[5] = rx[6];
+
+       tx[0] = SHT21_READ_SNAC_CMD1;
+       tx[1] = SHT21_READ_SNAC_CMD2;
+       msgs[1].len = 6;
+       ret = i2c_transfer(client->adapter, msgs, 2);
+       if (ret < 0)
+               goto out;
+       eic[0] = rx[3];
+       eic[1] = rx[4];
+       eic[6] = rx[0];
+       eic[7] = rx[1];
+
+       ret = snprintf(sht21->eic, sizeof(sht21->eic),
+                      "%02x%02x%02x%02x%02x%02x%02x%02x\n",
+                      eic[0], eic[1], eic[2], eic[3],
+                      eic[4], eic[5], eic[6], eic[7]);
+out:
+       if (ret < 0)
+               sht21->eic[0] = 0;
+
+       return ret;
+}
+
+/**
+ * eic_show() - show Electronic Identification Code in sysfs
+ * @dev: device
+ * @attr: device attribute
+ * @buf: sysfs buffer (PAGE_SIZE) where EIC is written
+ *
+ * Will be called on read access to eic sysfs attribute.
+ * Returns number of bytes written into buffer, negative errno on error.
+ */
+static ssize_t eic_show(struct device *dev,
+       struct device_attribute *attr,
+       char *buf)
+{
+       struct sht21 *sht21 = dev_get_drvdata(dev);
+       int ret;
+
+       ret = sizeof(sht21->eic) - 1;
+       mutex_lock(&sht21->lock);
+       if (!sht21->eic[0])
+               ret = eic_read(sht21);
+       if (ret > 0)
+               memcpy(buf, sht21->eic, ret);
+       mutex_unlock(&sht21->lock);
+       return ret;
+}
+
 /* sysfs attributes */
 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, sht21_show_temperature,
        NULL, 0);
 static SENSOR_DEVICE_ATTR(humidity1_input, S_IRUGO, sht21_show_humidity,
        NULL, 0);
+static DEVICE_ATTR_RO(eic);
 
 static struct attribute *sht21_attrs[] = {
        &sensor_dev_attr_temp1_input.dev_attr.attr,
        &sensor_dev_attr_humidity1_input.dev_attr.attr,
+       &dev_attr_eic.attr,
        NULL
 };
 
index 45a028fb88517619c19b1f9b5fe6350f3f2b93ca..6d789aab54c9852c0121b96b989b172c1821c167 100644 (file)
@@ -304,22 +304,23 @@ show_in_offset(3);
 show_in_offset(4);
 
 /* Temperature */
-static ssize_t show_temp(struct device *dev, struct device_attribute *attr,
-                        char *buf)
+static ssize_t temp1_input_show(struct device *dev,
+                               struct device_attribute *attr, char *buf)
 {
        struct sis5595_data *data = sis5595_update_device(dev);
        return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp));
 }
 
-static ssize_t show_temp_over(struct device *dev, struct device_attribute *attr,
+static ssize_t temp1_max_show(struct device *dev, struct device_attribute *attr,
                              char *buf)
 {
        struct sis5595_data *data = sis5595_update_device(dev);
        return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_over));
 }
 
-static ssize_t set_temp_over(struct device *dev, struct device_attribute *attr,
-                            const char *buf, size_t count)
+static ssize_t temp1_max_store(struct device *dev,
+                              struct device_attribute *attr, const char *buf,
+                              size_t count)
 {
        struct sis5595_data *data = dev_get_drvdata(dev);
        long val;
@@ -336,15 +337,16 @@ static ssize_t set_temp_over(struct device *dev, struct device_attribute *attr,
        return count;
 }
 
-static ssize_t show_temp_hyst(struct device *dev, struct device_attribute *attr,
-                             char *buf)
+static ssize_t temp1_max_hyst_show(struct device *dev,
+                                  struct device_attribute *attr, char *buf)
 {
        struct sis5595_data *data = sis5595_update_device(dev);
        return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_hyst));
 }
 
-static ssize_t set_temp_hyst(struct device *dev, struct device_attribute *attr,
-                            const char *buf, size_t count)
+static ssize_t temp1_max_hyst_store(struct device *dev,
+                                   struct device_attribute *attr,
+                                   const char *buf, size_t count)
 {
        struct sis5595_data *data = dev_get_drvdata(dev);
        long val;
@@ -361,11 +363,9 @@ static ssize_t set_temp_hyst(struct device *dev, struct device_attribute *attr,
        return count;
 }
 
-static DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL);
-static DEVICE_ATTR(temp1_max, S_IRUGO | S_IWUSR,
-               show_temp_over, set_temp_over);
-static DEVICE_ATTR(temp1_max_hyst, S_IRUGO | S_IWUSR,
-               show_temp_hyst, set_temp_hyst);
+static DEVICE_ATTR_RO(temp1_input);
+static DEVICE_ATTR_RW(temp1_max);
+static DEVICE_ATTR_RW(temp1_max_hyst);
 
 /* 2 Fans */
 static ssize_t show_fan(struct device *dev, struct device_attribute *da,
@@ -492,13 +492,13 @@ show_fan_offset(1);
 show_fan_offset(2);
 
 /* Alarms */
-static ssize_t show_alarms(struct device *dev, struct device_attribute *attr,
+static ssize_t alarms_show(struct device *dev, struct device_attribute *attr,
                           char *buf)
 {
        struct sis5595_data *data = sis5595_update_device(dev);
        return sprintf(buf, "%d\n", data->alarms);
 }
-static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
+static DEVICE_ATTR_RO(alarms);
 
 static ssize_t show_alarm(struct device *dev, struct device_attribute *da,
                          char *buf)
@@ -516,13 +516,13 @@ static SENSOR_DEVICE_ATTR(fan1_alarm, S_IRUGO, show_alarm, NULL, 6);
 static SENSOR_DEVICE_ATTR(fan2_alarm, S_IRUGO, show_alarm, NULL, 7);
 static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 15);
 
-static ssize_t show_name(struct device *dev, struct device_attribute *attr,
+static ssize_t name_show(struct device *dev, struct device_attribute *attr,
                         char *buf)
 {
        struct sis5595_data *data = dev_get_drvdata(dev);
        return sprintf(buf, "%s\n", data->name);
 }
-static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
+static DEVICE_ATTR_RO(name);
 
 static struct attribute *sis5595_attributes[] = {
        &sensor_dev_attr_in0_input.dev_attr.attr,
index 5d323186d2c10be446db19325b410e9aa384cfd3..c7b6a425e2c0270f5b20a7b09241a0eadce94ce8 100644 (file)
@@ -264,8 +264,8 @@ static ssize_t get_pwm_en(struct device *dev, struct device_attribute
        return sprintf(buf, "%d\n", PWM_EN_FROM_REG(data->pwm[attr->index]));
 }
 
-static ssize_t get_alarms(struct device *dev, struct device_attribute
-                         *devattr, char *buf)
+static ssize_t alarms_show(struct device *dev,
+                          struct device_attribute *devattr, char *buf)
 {
        struct smsc47m1_data *data = smsc47m1_update_device(dev, 0);
        return sprintf(buf, "%d\n", data->alarms);
@@ -440,16 +440,16 @@ fan_present(1);
 fan_present(2);
 fan_present(3);
 
-static DEVICE_ATTR(alarms, S_IRUGO, get_alarms, NULL);
+static DEVICE_ATTR_RO(alarms);
 
-static ssize_t show_name(struct device *dev, struct device_attribute
+static ssize_t name_show(struct device *dev, struct device_attribute
                         *devattr, char *buf)
 {
        struct smsc47m1_data *data = dev_get_drvdata(dev);
 
        return sprintf(buf, "%s\n", data->name);
 }
-static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
+static DEVICE_ATTR_RO(name);
 
 static struct attribute *smsc47m1_attributes_fan1[] = {
        &sensor_dev_attr_fan1_input.dev_attr.attr,
index 15650f2476791192d4025a297e052e8d36c9fa80..6989408033ec4b4c5e5fc0ac02623be568d59e02 100644 (file)
@@ -400,23 +400,23 @@ show_temp_index(2)
 show_temp_index(3)
 
 /* VID */
-static ssize_t show_vid(struct device *dev, struct device_attribute *attr,
-               char *buf)
+static ssize_t cpu0_vid_show(struct device *dev,
+                            struct device_attribute *attr, char *buf)
 {
        struct smsc47m192_data *data = smsc47m192_update_device(dev);
        return sprintf(buf, "%d\n", vid_from_reg(data->vid, data->vrm));
 }
-static DEVICE_ATTR(cpu0_vid, S_IRUGO, show_vid, NULL);
+static DEVICE_ATTR_RO(cpu0_vid);
 
-static ssize_t show_vrm(struct device *dev, struct device_attribute *attr,
+static ssize_t vrm_show(struct device *dev, struct device_attribute *attr,
                char *buf)
 {
        struct smsc47m192_data *data = dev_get_drvdata(dev);
        return sprintf(buf, "%d\n", data->vrm);
 }
 
-static ssize_t set_vrm(struct device *dev, struct device_attribute *attr,
-               const char *buf, size_t count)
+static ssize_t vrm_store(struct device *dev, struct device_attribute *attr,
+                        const char *buf, size_t count)
 {
        struct smsc47m192_data *data = dev_get_drvdata(dev);
        unsigned long val;
@@ -431,7 +431,7 @@ static ssize_t set_vrm(struct device *dev, struct device_attribute *attr,
        data->vrm = val;
        return count;
 }
-static DEVICE_ATTR(vrm, S_IRUGO | S_IWUSR, show_vrm, set_vrm);
+static DEVICE_ATTR_RW(vrm);
 
 /* Alarms */
 static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
diff --git a/drivers/hwmon/stts751.c b/drivers/hwmon/stts751.c
new file mode 100644 (file)
index 0000000..5545068
--- /dev/null
@@ -0,0 +1,834 @@
+/*
+ * STTS751 sensor driver
+ *
+ * Copyright (C) 2016-2017 Istituto Italiano di Tecnologia - RBCS - EDL
+ * Robotics, Brain and Cognitive Sciences department
+ * Electronic Design Laboratory
+ *
+ * Written by Andrea Merello <andrea.merello@gmail.com>
+ *
+ * Based on  LM95241 driver and LM90 driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/property.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/util_macros.h>
+
+#define DEVNAME "stts751"
+
+static const unsigned short normal_i2c[] = {
+       0x48, 0x49, 0x38, 0x39,  /* STTS751-0 */
+       0x4A, 0x4B, 0x3A, 0x3B,  /* STTS751-1 */
+       I2C_CLIENT_END };
+
+#define STTS751_REG_TEMP_H     0x00
+#define STTS751_REG_STATUS     0x01
+#define STTS751_STATUS_TRIPT   BIT(0)
+#define STTS751_STATUS_TRIPL   BIT(5)
+#define STTS751_STATUS_TRIPH   BIT(6)
+#define STTS751_REG_TEMP_L     0x02
+#define STTS751_REG_CONF       0x03
+#define STTS751_CONF_RES_MASK  0x0C
+#define STTS751_CONF_RES_SHIFT  2
+#define STTS751_CONF_EVENT_DIS  BIT(7)
+#define STTS751_CONF_STOP      BIT(6)
+#define STTS751_REG_RATE       0x04
+#define STTS751_REG_HLIM_H     0x05
+#define STTS751_REG_HLIM_L     0x06
+#define STTS751_REG_LLIM_H     0x07
+#define STTS751_REG_LLIM_L     0x08
+#define STTS751_REG_TLIM       0x20
+#define STTS751_REG_HYST       0x21
+#define STTS751_REG_SMBUS_TO   0x22
+
+#define STTS751_REG_PROD_ID    0xFD
+#define STTS751_REG_MAN_ID     0xFE
+#define STTS751_REG_REV_ID     0xFF
+
+#define STTS751_0_PROD_ID      0x00
+#define STTS751_1_PROD_ID      0x01
+#define ST_MAN_ID              0x53
+
+/*
+ * Possible update intervals are (in mS):
+ * 16000, 8000, 4000, 2000, 1000, 500, 250, 125, 62.5, 31.25
+ * However we are not going to complicate things too much and we stick to the
+ * approx value in mS.
+ */
+static const int stts751_intervals[] = {
+       16000, 8000, 4000, 2000, 1000, 500, 250, 125, 63, 31
+};
+
+static const struct i2c_device_id stts751_id[] = {
+       { "stts751", 0 },
+       { }
+};
+
+struct stts751_priv {
+       struct device *dev;
+       struct i2c_client *client;
+       struct mutex access_lock;
+       u8 interval;
+       int res;
+       int event_max, event_min;
+       int therm;
+       int hyst;
+       bool smbus_timeout;
+       int temp;
+       unsigned long last_update, last_alert_update;
+       u8 config;
+       bool min_alert, max_alert, therm_trip;
+       bool data_valid, alert_valid;
+       bool notify_max, notify_min;
+};
+
+/*
+ * These functions converts temperature from HW format to integer format and
+ * vice-vers. They are (mostly) taken from lm90 driver. Unit is in mC.
+ */
+static int stts751_to_deg(s16 hw_val)
+{
+       return hw_val * 125 / 32;
+}
+
+static s32 stts751_to_hw(int val)
+{
+       return DIV_ROUND_CLOSEST(val, 125) * 32;
+}
+
+static int stts751_adjust_resolution(struct stts751_priv *priv)
+{
+       u8 res;
+
+       switch (priv->interval) {
+       case 9:
+               /* 10 bits */
+               res = 0;
+               break;
+       case 8:
+               /* 11 bits */
+               res = 1;
+               break;
+       default:
+               /* 12 bits */
+               res = 3;
+               break;
+       }
+
+       if (priv->res == res)
+               return 0;
+
+       priv->config &= ~STTS751_CONF_RES_MASK;
+       priv->config |= res << STTS751_CONF_RES_SHIFT;
+       dev_dbg(&priv->client->dev, "setting res %d. config %x",
+               res, priv->config);
+       priv->res = res;
+
+       return i2c_smbus_write_byte_data(priv->client,
+                               STTS751_REG_CONF, priv->config);
+}
+
+static int stts751_update_temp(struct stts751_priv *priv)
+{
+       s32 integer1, integer2, frac;
+
+       /*
+        * There is a trick here, like in the lm90 driver. We have to read two
+        * registers to get the sensor temperature, but we have to beware a
+        * conversion could occur between the readings. We could use the
+        * one-shot conversion register, but we don't want to do this (disables
+        * hardware monitoring). So the solution used here is to read the high
+        * byte once, then the low byte, then the high byte again. If the new
+        * high byte matches the old one, then we have a valid reading. Else we
+        * have to read the low byte again, and now we believe we have a correct
+        * reading.
+        */
+       integer1 = i2c_smbus_read_byte_data(priv->client, STTS751_REG_TEMP_H);
+       if (integer1 < 0) {
+               dev_dbg(&priv->client->dev,
+                       "I2C read failed (temp H). ret: %x\n", integer1);
+               return integer1;
+       }
+
+       frac = i2c_smbus_read_byte_data(priv->client, STTS751_REG_TEMP_L);
+       if (frac < 0) {
+               dev_dbg(&priv->client->dev,
+                       "I2C read failed (temp L). ret: %x\n", frac);
+               return frac;
+       }
+
+       integer2 = i2c_smbus_read_byte_data(priv->client, STTS751_REG_TEMP_H);
+       if (integer2 < 0) {
+               dev_dbg(&priv->client->dev,
+                       "I2C 2nd read failed (temp H). ret: %x\n", integer2);
+               return integer2;
+       }
+
+       if (integer1 != integer2) {
+               frac = i2c_smbus_read_byte_data(priv->client,
+                                               STTS751_REG_TEMP_L);
+               if (frac < 0) {
+                       dev_dbg(&priv->client->dev,
+                               "I2C 2nd read failed (temp L). ret: %x\n",
+                               frac);
+                       return frac;
+               }
+       }
+
+       priv->temp = stts751_to_deg((integer1 << 8) | frac);
+       return 0;
+}
+
+static int stts751_set_temp_reg16(struct stts751_priv *priv, int temp,
+                                 u8 hreg, u8 lreg)
+{
+       s32 hwval;
+       int ret;
+
+       hwval = stts751_to_hw(temp);
+
+       ret = i2c_smbus_write_byte_data(priv->client, hreg, hwval >> 8);
+       if (ret)
+               return ret;
+
+       return i2c_smbus_write_byte_data(priv->client, lreg, hwval & 0xff);
+}
+
+static int stts751_set_temp_reg8(struct stts751_priv *priv, int temp, u8 reg)
+{
+       s32 hwval;
+
+       hwval = stts751_to_hw(temp);
+       return i2c_smbus_write_byte_data(priv->client, reg, hwval >> 8);
+}
+
+static int stts751_read_reg16(struct stts751_priv *priv, int *temp,
+                             u8 hreg, u8 lreg)
+{
+       int integer, frac;
+
+       integer = i2c_smbus_read_byte_data(priv->client, hreg);
+       if (integer < 0)
+               return integer;
+
+       frac = i2c_smbus_read_byte_data(priv->client, lreg);
+       if (frac < 0)
+               return frac;
+
+       *temp = stts751_to_deg((integer << 8) | frac);
+
+       return 0;
+}
+
+static int stts751_read_reg8(struct stts751_priv *priv, int *temp, u8 reg)
+{
+       int integer;
+
+       integer = i2c_smbus_read_byte_data(priv->client, reg);
+       if (integer < 0)
+               return integer;
+
+       *temp = stts751_to_deg(integer << 8);
+
+       return 0;
+}
+
+/*
+ * Update alert flags without waiting for cache to expire. We detects alerts
+ * immediately for the sake of the alert handler; we still need to deal with
+ * caching to workaround the fact that alarm flags int the status register,
+ * despite what the datasheet claims, gets always cleared on read.
+ */
+static int stts751_update_alert(struct stts751_priv *priv)
+{
+       int ret;
+       bool conv_done;
+       int cache_time = msecs_to_jiffies(stts751_intervals[priv->interval]);
+
+       /*
+        * Add another 10% because if we run faster than the HW conversion
+        * rate we will end up in reporting incorrectly alarms.
+        */
+       cache_time += cache_time / 10;
+
+       ret = i2c_smbus_read_byte_data(priv->client, STTS751_REG_STATUS);
+       if (ret < 0)
+               return ret;
+
+       dev_dbg(&priv->client->dev, "status reg %x\n", ret);
+       conv_done = ret & (STTS751_STATUS_TRIPH | STTS751_STATUS_TRIPL);
+       /*
+        * Reset the cache if the cache time expired, or if we are sure
+        * we have valid data from a device conversion, or if we know
+        * our cache has been never written.
+        *
+        * Note that when the cache has been never written the point is
+        * to correctly initialize the timestamp, rather than clearing
+        * the cache values.
+        *
+        * Note that updating the cache timestamp when we get an alarm flag
+        * is required, otherwise we could incorrectly report alarms to be zero.
+        */
+       if (time_after(jiffies, priv->last_alert_update + cache_time) ||
+           conv_done || !priv->alert_valid) {
+               priv->max_alert = false;
+               priv->min_alert = false;
+               priv->alert_valid = true;
+               priv->last_alert_update = jiffies;
+               dev_dbg(&priv->client->dev, "invalidating alert cache\n");
+       }
+
+       priv->max_alert |= !!(ret & STTS751_STATUS_TRIPH);
+       priv->min_alert |= !!(ret & STTS751_STATUS_TRIPL);
+       priv->therm_trip = !!(ret & STTS751_STATUS_TRIPT);
+
+       dev_dbg(&priv->client->dev, "max_alert: %d, min_alert: %d, therm_trip: %d\n",
+               priv->max_alert, priv->min_alert, priv->therm_trip);
+
+       return 0;
+}
+
+static void stts751_alert(struct i2c_client *client,
+                         enum i2c_alert_protocol type, unsigned int data)
+{
+       int ret;
+       struct stts751_priv *priv = i2c_get_clientdata(client);
+
+       if (type != I2C_PROTOCOL_SMBUS_ALERT)
+               return;
+
+       dev_dbg(&client->dev, "alert!");
+
+       mutex_lock(&priv->access_lock);
+       ret = stts751_update_alert(priv);
+       if (ret < 0) {
+               /* default to worst case */
+               priv->max_alert = true;
+               priv->min_alert = true;
+
+               dev_warn(priv->dev,
+                        "Alert received, but can't communicate to the device. Triggering all alarms!");
+       }
+
+       if (priv->max_alert) {
+               if (priv->notify_max)
+                       dev_notice(priv->dev, "got alert for HIGH temperature");
+               priv->notify_max = false;
+
+               /* unblock alert poll */
+               sysfs_notify(&priv->dev->kobj, NULL, "temp1_max_alarm");
+       }
+
+       if (priv->min_alert) {
+               if (priv->notify_min)
+                       dev_notice(priv->dev, "got alert for LOW temperature");
+               priv->notify_min = false;
+
+               /* unblock alert poll */
+               sysfs_notify(&priv->dev->kobj, NULL, "temp1_min_alarm");
+       }
+
+       if (priv->min_alert || priv->max_alert)
+               kobject_uevent(&priv->dev->kobj, KOBJ_CHANGE);
+
+       mutex_unlock(&priv->access_lock);
+}
+
+static int stts751_update(struct stts751_priv *priv)
+{
+       int ret;
+       int cache_time = msecs_to_jiffies(stts751_intervals[priv->interval]);
+
+       if (time_after(jiffies, priv->last_update + cache_time) ||
+           !priv->data_valid) {
+               ret = stts751_update_temp(priv);
+               if (ret)
+                       return ret;
+
+               ret = stts751_update_alert(priv);
+               if (ret)
+                       return ret;
+               priv->data_valid = true;
+               priv->last_update = jiffies;
+       }
+
+       return 0;
+}
+
+static ssize_t show_max_alarm(struct device *dev, struct device_attribute *attr,
+                             char *buf)
+{
+       int ret;
+       struct stts751_priv *priv = dev_get_drvdata(dev);
+
+       mutex_lock(&priv->access_lock);
+       ret = stts751_update(priv);
+       if (!ret)
+               priv->notify_max = true;
+       mutex_unlock(&priv->access_lock);
+       if (ret < 0)
+               return ret;
+
+       return snprintf(buf, PAGE_SIZE - 1, "%d\n", priv->max_alert);
+}
+
+static ssize_t show_min_alarm(struct device *dev, struct device_attribute *attr,
+                             char *buf)
+{
+       int ret;
+       struct stts751_priv *priv = dev_get_drvdata(dev);
+
+       mutex_lock(&priv->access_lock);
+       ret = stts751_update(priv);
+       if (!ret)
+               priv->notify_min = true;
+       mutex_unlock(&priv->access_lock);
+       if (ret < 0)
+               return ret;
+
+       return snprintf(buf, PAGE_SIZE - 1, "%d\n", priv->min_alert);
+}
+
+static ssize_t show_input(struct device *dev, struct device_attribute *attr,
+                         char *buf)
+{
+       int ret;
+       struct stts751_priv *priv = dev_get_drvdata(dev);
+
+       mutex_lock(&priv->access_lock);
+       ret = stts751_update(priv);
+       mutex_unlock(&priv->access_lock);
+       if (ret < 0)
+               return ret;
+
+       return snprintf(buf, PAGE_SIZE - 1, "%d\n", priv->temp);
+}
+
+static ssize_t show_therm(struct device *dev, struct device_attribute *attr,
+                         char *buf)
+{
+       struct stts751_priv *priv = dev_get_drvdata(dev);
+
+       return snprintf(buf, PAGE_SIZE - 1, "%d\n", priv->therm);
+}
+
+static ssize_t set_therm(struct device *dev, struct device_attribute *attr,
+                        const char *buf, size_t count)
+{
+       int ret;
+       long temp;
+       struct stts751_priv *priv = dev_get_drvdata(dev);
+
+       if (kstrtol(buf, 10, &temp) < 0)
+               return -EINVAL;
+
+       /* HW works in range -64C to +127.937C */
+       temp = clamp_val(temp, -64000, 127937);
+       mutex_lock(&priv->access_lock);
+       ret = stts751_set_temp_reg8(priv, temp, STTS751_REG_TLIM);
+       if (ret)
+               goto exit;
+
+       dev_dbg(&priv->client->dev, "setting therm %ld", temp);
+
+       /*
+        * hysteresis reg is relative to therm, so the HW does not need to be
+        * adjusted, we need to update our local copy only.
+        */
+       priv->hyst = temp - (priv->therm - priv->hyst);
+       priv->therm = temp;
+
+exit:
+       mutex_unlock(&priv->access_lock);
+       if (ret)
+               return ret;
+
+       return count;
+}
+
+static ssize_t show_hyst(struct device *dev, struct device_attribute *attr,
+                        char *buf)
+{
+       struct stts751_priv *priv = dev_get_drvdata(dev);
+
+       return snprintf(buf, PAGE_SIZE - 1, "%d\n", priv->hyst);
+}
+
+static ssize_t set_hyst(struct device *dev, struct device_attribute *attr,
+                       const char *buf, size_t count)
+{
+       int ret;
+       long temp;
+
+       struct stts751_priv *priv = dev_get_drvdata(dev);
+
+       if (kstrtol(buf, 10, &temp) < 0)
+               return -EINVAL;
+
+       mutex_lock(&priv->access_lock);
+       /* HW works in range -64C to +127.937C */
+       temp = clamp_val(temp, -64000, priv->therm);
+       priv->hyst = temp;
+       dev_dbg(&priv->client->dev, "setting hyst %ld", temp);
+       temp = priv->therm - temp;
+       ret = stts751_set_temp_reg8(priv, temp, STTS751_REG_HYST);
+       mutex_unlock(&priv->access_lock);
+       if (ret)
+               return ret;
+
+       return count;
+}
+
+static ssize_t show_therm_trip(struct device *dev,
+                              struct device_attribute *attr, char *buf)
+{
+       int ret;
+       struct stts751_priv *priv = dev_get_drvdata(dev);
+
+       mutex_lock(&priv->access_lock);
+       ret = stts751_update(priv);
+       mutex_unlock(&priv->access_lock);
+       if (ret < 0)
+               return ret;
+
+       return snprintf(buf, PAGE_SIZE - 1, "%d\n", priv->therm_trip);
+}
+
+static ssize_t show_max(struct device *dev, struct device_attribute *attr,
+                       char *buf)
+{
+       struct stts751_priv *priv = dev_get_drvdata(dev);
+
+       return snprintf(buf, PAGE_SIZE - 1, "%d\n", priv->event_max);
+}
+
+static ssize_t set_max(struct device *dev, struct device_attribute *attr,
+                      const char *buf, size_t count)
+{
+       int ret;
+       long temp;
+       struct stts751_priv *priv = dev_get_drvdata(dev);
+
+       if (kstrtol(buf, 10, &temp) < 0)
+               return -EINVAL;
+
+       mutex_lock(&priv->access_lock);
+       /* HW works in range -64C to +127.937C */
+       temp = clamp_val(temp, priv->event_min, 127937);
+       ret = stts751_set_temp_reg16(priv, temp,
+                                    STTS751_REG_HLIM_H, STTS751_REG_HLIM_L);
+       if (ret)
+               goto exit;
+
+       dev_dbg(&priv->client->dev, "setting event max %ld", temp);
+       priv->event_max = temp;
+       ret = count;
+exit:
+       mutex_unlock(&priv->access_lock);
+       return ret;
+}
+
+static ssize_t show_min(struct device *dev, struct device_attribute *attr,
+                       char *buf)
+{
+       struct stts751_priv *priv = dev_get_drvdata(dev);
+
+       return snprintf(buf, PAGE_SIZE - 1, "%d\n", priv->event_min);
+}
+
+static ssize_t set_min(struct device *dev, struct device_attribute *attr,
+                      const char *buf, size_t count)
+{
+       int ret;
+       long temp;
+       struct stts751_priv *priv = dev_get_drvdata(dev);
+
+       if (kstrtol(buf, 10, &temp) < 0)
+               return -EINVAL;
+
+       mutex_lock(&priv->access_lock);
+       /* HW works in range -64C to +127.937C */
+       temp = clamp_val(temp, -64000, priv->event_max);
+       ret = stts751_set_temp_reg16(priv, temp,
+                                    STTS751_REG_LLIM_H, STTS751_REG_LLIM_L);
+       if (ret)
+               goto exit;
+
+       dev_dbg(&priv->client->dev, "setting event min %ld", temp);
+       priv->event_min = temp;
+       ret = count;
+exit:
+       mutex_unlock(&priv->access_lock);
+       return ret;
+}
+
+static ssize_t show_interval(struct device *dev, struct device_attribute *attr,
+                            char *buf)
+{
+       struct stts751_priv *priv = dev_get_drvdata(dev);
+
+       return snprintf(buf, PAGE_SIZE - 1, "%d\n",
+                       stts751_intervals[priv->interval]);
+}
+
+static ssize_t set_interval(struct device *dev, struct device_attribute *attr,
+                           const char *buf, size_t count)
+{
+       unsigned long val;
+       int idx;
+       int ret = count;
+       struct stts751_priv *priv = dev_get_drvdata(dev);
+
+       if (kstrtoul(buf, 10, &val) < 0)
+               return -EINVAL;
+
+       idx = find_closest_descending(val, stts751_intervals,
+                                     ARRAY_SIZE(stts751_intervals));
+
+       dev_dbg(&priv->client->dev, "setting interval. req:%lu, idx: %d, val: %d",
+               val, idx, stts751_intervals[idx]);
+
+       mutex_lock(&priv->access_lock);
+       if (priv->interval == idx)
+               goto exit;
+
+       /*
+        * In early development stages I've become suspicious about the chip
+        * starting to misbehave if I ever set, even briefly, an invalid
+        * configuration. While I'm not sure this is really needed, be
+        * conservative and set rate/resolution in such an order that avoids
+        * passing through an invalid configuration.
+        */
+
+       /* speed up: lower the resolution, then modify convrate */
+       if (priv->interval < idx) {
+               dev_dbg(&priv->client->dev, "lower resolution, then modify convrate");
+               priv->interval = idx;
+               ret = stts751_adjust_resolution(priv);
+               if (ret)
+                       goto exit;
+       }
+
+       ret = i2c_smbus_write_byte_data(priv->client, STTS751_REG_RATE, idx);
+       if (ret)
+               goto exit;
+       /* slow down: modify convrate, then raise resolution */
+       if (priv->interval != idx) {
+               dev_dbg(&priv->client->dev, "modify convrate, then raise resolution");
+               priv->interval = idx;
+               ret = stts751_adjust_resolution(priv);
+               if (ret)
+                       goto exit;
+       }
+       ret = count;
+exit:
+       mutex_unlock(&priv->access_lock);
+
+       return ret;
+}
+
+static int stts751_detect(struct i2c_client *new_client,
+                         struct i2c_board_info *info)
+{
+       struct i2c_adapter *adapter = new_client->adapter;
+       const char *name;
+       int tmp;
+
+       if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+               return -ENODEV;
+
+       tmp = i2c_smbus_read_byte_data(new_client, STTS751_REG_MAN_ID);
+       if (tmp != ST_MAN_ID)
+               return -ENODEV;
+
+       /* lower temperaure registers always have bits 0-3 set to zero */
+       tmp = i2c_smbus_read_byte_data(new_client, STTS751_REG_TEMP_L);
+       if (tmp & 0xf)
+               return -ENODEV;
+
+       tmp = i2c_smbus_read_byte_data(new_client, STTS751_REG_HLIM_L);
+       if (tmp & 0xf)
+               return -ENODEV;
+
+       tmp = i2c_smbus_read_byte_data(new_client, STTS751_REG_LLIM_L);
+       if (tmp & 0xf)
+               return -ENODEV;
+
+       /* smbus timeout register always have bits 0-7 set to zero */
+       tmp = i2c_smbus_read_byte_data(new_client, STTS751_REG_SMBUS_TO);
+       if (tmp & 0x7f)
+               return -ENODEV;
+
+       tmp = i2c_smbus_read_byte_data(new_client, STTS751_REG_PROD_ID);
+
+       switch (tmp) {
+       case STTS751_0_PROD_ID:
+               name = "STTS751-0";
+               break;
+       case STTS751_1_PROD_ID:
+               name = "STTS751-1";
+               break;
+       default:
+               return -ENODEV;
+       }
+       dev_dbg(&new_client->dev, "Chip %s detected", name);
+
+       strlcpy(info->type, stts751_id[0].name, I2C_NAME_SIZE);
+       return 0;
+}
+
+static int stts751_read_chip_config(struct stts751_priv *priv)
+{
+       int ret;
+       int tmp;
+
+       ret = i2c_smbus_read_byte_data(priv->client, STTS751_REG_CONF);
+       if (ret < 0)
+               return ret;
+       priv->config = ret;
+       priv->res = (ret & STTS751_CONF_RES_MASK) >> STTS751_CONF_RES_SHIFT;
+
+       ret = i2c_smbus_read_byte_data(priv->client, STTS751_REG_RATE);
+       if (ret < 0)
+               return ret;
+       priv->interval = ret;
+
+       ret = stts751_read_reg16(priv, &priv->event_max,
+                                STTS751_REG_HLIM_H, STTS751_REG_HLIM_L);
+       if (ret)
+               return ret;
+
+       ret = stts751_read_reg16(priv, &priv->event_min,
+                                STTS751_REG_LLIM_H, STTS751_REG_LLIM_L);
+       if (ret)
+               return ret;
+
+       ret = stts751_read_reg8(priv, &priv->therm, STTS751_REG_TLIM);
+       if (ret)
+               return ret;
+
+       ret = stts751_read_reg8(priv, &tmp, STTS751_REG_HYST);
+       if (ret)
+               return ret;
+       priv->hyst = priv->therm - tmp;
+
+       return 0;
+}
+
+static SENSOR_DEVICE_ATTR(temp1_input, 0444, show_input, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp1_min, 0644, show_min, set_min, 0);
+static SENSOR_DEVICE_ATTR(temp1_max, 0644, show_max, set_max, 0);
+static SENSOR_DEVICE_ATTR(temp1_min_alarm, 0444, show_min_alarm, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp1_max_alarm, 0444, show_max_alarm, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp1_crit, 0644, show_therm,        set_therm, 0);
+static SENSOR_DEVICE_ATTR(temp1_crit_hyst, 0644, show_hyst, set_hyst, 0);
+static SENSOR_DEVICE_ATTR(temp1_crit_alarm, 0444, show_therm_trip, NULL, 0);
+static SENSOR_DEVICE_ATTR(update_interval, 0644,
+                         show_interval, set_interval, 0);
+
+static struct attribute *stts751_attrs[] = {
+       &sensor_dev_attr_temp1_input.dev_attr.attr,
+       &sensor_dev_attr_temp1_min.dev_attr.attr,
+       &sensor_dev_attr_temp1_max.dev_attr.attr,
+       &sensor_dev_attr_temp1_min_alarm.dev_attr.attr,
+       &sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
+       &sensor_dev_attr_temp1_crit.dev_attr.attr,
+       &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
+       &sensor_dev_attr_temp1_crit_alarm.dev_attr.attr,
+       &sensor_dev_attr_update_interval.dev_attr.attr,
+       NULL
+};
+ATTRIBUTE_GROUPS(stts751);
+
+static int stts751_probe(struct i2c_client *client,
+                        const struct i2c_device_id *id)
+{
+       struct stts751_priv *priv;
+       int ret;
+       bool smbus_nto;
+       int rev_id;
+
+       priv = devm_kzalloc(&client->dev, sizeof(*priv), GFP_KERNEL);
+       if (!priv)
+               return -ENOMEM;
+
+       priv->client = client;
+       priv->notify_max = true;
+       priv->notify_min = true;
+       i2c_set_clientdata(client, priv);
+       mutex_init(&priv->access_lock);
+
+       if (device_property_present(&client->dev,
+                                   "smbus-timeout-disable")) {
+               smbus_nto = device_property_read_bool(&client->dev,
+                                                     "smbus-timeout-disable");
+
+               ret = i2c_smbus_write_byte_data(client, STTS751_REG_SMBUS_TO,
+                                               smbus_nto ? 0 : 0x80);
+               if (ret)
+                       return ret;
+       }
+
+       rev_id = i2c_smbus_read_byte_data(client, STTS751_REG_REV_ID);
+       if (rev_id < 0)
+               return -ENODEV;
+       if (rev_id != 0x1) {
+               dev_dbg(&client->dev, "Chip revision 0x%x is untested\n",
+                       rev_id);
+       }
+
+       ret = stts751_read_chip_config(priv);
+       if (ret)
+               return ret;
+
+       priv->config &= ~(STTS751_CONF_STOP | STTS751_CONF_EVENT_DIS);
+       ret = i2c_smbus_write_byte_data(client, STTS751_REG_CONF, priv->config);
+       if (ret)
+               return ret;
+
+       priv->dev = devm_hwmon_device_register_with_groups(&client->dev,
+                                                       client->name, priv,
+                                                       stts751_groups);
+       return PTR_ERR_OR_ZERO(priv->dev);
+}
+
+MODULE_DEVICE_TABLE(i2c, stts751_id);
+
+static struct i2c_driver stts751_driver = {
+       .class          = I2C_CLASS_HWMON,
+       .driver = {
+               .name   = DEVNAME,
+       },
+       .probe          = stts751_probe,
+       .id_table       = stts751_id,
+       .detect         = stts751_detect,
+       .alert          = stts751_alert,
+       .address_list   = normal_i2c,
+};
+
+module_i2c_driver(stts751_driver);
+
+MODULE_AUTHOR("Andrea Merello <andrea.merello@gmail.com>");
+MODULE_DESCRIPTION("STTS751 sensor driver");
+MODULE_LICENSE("GPL");
index eeeed2c7d0816c481df22588ec0cdcf709426cd6..1f2d13dc94396f87a779e665eb6a51f1aee656fe 100644 (file)
@@ -82,16 +82,6 @@ static const u8 TMP401_TEMP_MSB_WRITE[7][2] = {
        { 0, 0x11 },    /* offset */
 };
 
-static const u8 TMP401_TEMP_LSB[7][2] = {
-       { 0x15, 0x10 }, /* temp */
-       { 0x17, 0x14 }, /* low limit */
-       { 0x16, 0x13 }, /* high limit */
-       { 0, 0 },       /* therm (crit) limit (unused) */
-       { 0x31, 0x35 }, /* lowest */
-       { 0x33, 0x37 }, /* highest */
-       { 0, 0x12 },    /* offset */
-};
-
 static const u8 TMP432_TEMP_MSB_READ[4][3] = {
        { 0x00, 0x01, 0x23 },   /* temp */
        { 0x06, 0x08, 0x16 },   /* low limit */
@@ -106,12 +96,6 @@ static const u8 TMP432_TEMP_MSB_WRITE[4][3] = {
        { 0x20, 0x19, 0x1A },   /* therm (crit) limit */
 };
 
-static const u8 TMP432_TEMP_LSB[3][3] = {
-       { 0x29, 0x10, 0x24 },   /* temp */
-       { 0x3E, 0x14, 0x18 },   /* low limit */
-       { 0x3D, 0x13, 0x17 },   /* high limit */
-};
-
 /* [0] = fault, [1] = low, [2] = high, [3] = therm/crit */
 static const u8 TMP432_STATUS_REG[] = {
        0x1b, 0x36, 0x35, 0x37 };
@@ -213,25 +197,20 @@ static int tmp401_update_device_reg16(struct i2c_client *client,
        for (i = 0; i < num_sensors; i++) {             /* local / r1 / r2 */
                for (j = 0; j < num_regs; j++) {        /* temp / low / ... */
                        u8 regaddr;
-                       /*
-                        * High byte must be read first immediately followed
-                        * by the low byte
-                        */
+
                        regaddr = data->kind == tmp432 ?
                                                TMP432_TEMP_MSB_READ[j][i] :
                                                TMP401_TEMP_MSB_READ[j][i];
-                       val = i2c_smbus_read_byte_data(client, regaddr);
-                       if (val < 0)
-                               return val;
-                       data->temp[j][i] = val << 8;
-                       if (j == 3)             /* crit is msb only */
-                               continue;
-                       regaddr = data->kind == tmp432 ? TMP432_TEMP_LSB[j][i]
-                                                      : TMP401_TEMP_LSB[j][i];
-                       val = i2c_smbus_read_byte_data(client, regaddr);
+                       if (j == 3) { /* crit is msb only */
+                               val = i2c_smbus_read_byte_data(client, regaddr);
+                       } else {
+                               val = i2c_smbus_read_word_swapped(client,
+                                                                 regaddr);
+                       }
                        if (val < 0)
                                return val;
-                       data->temp[j][i] |= val;
+
+                       data->temp[j][i] = j == 3 ? val << 8 : val;
                }
        }
        return 0;
@@ -373,11 +352,11 @@ static ssize_t store_temp(struct device *dev, struct device_attribute *devattr,
 
        regaddr = data->kind == tmp432 ? TMP432_TEMP_MSB_WRITE[nr][index]
                                       : TMP401_TEMP_MSB_WRITE[nr][index];
-       i2c_smbus_write_byte_data(client, regaddr, reg >> 8);
-       if (nr != 3) {
-               regaddr = data->kind == tmp432 ? TMP432_TEMP_LSB[nr][index]
-                                              : TMP401_TEMP_LSB[nr][index];
-               i2c_smbus_write_byte_data(client, regaddr, reg & 0xFF);
+       if (nr == 3) { /* crit is msb only */
+               i2c_smbus_write_byte_data(client, regaddr, reg >> 8);
+       } else {
+               /* Hardware expects big endian data --> use _swapped */
+               i2c_smbus_write_word_swapped(client, regaddr, reg);
        }
        data->temp[nr][index] = reg;
 
@@ -449,7 +428,7 @@ static ssize_t reset_temp_history(struct device *dev,
        return count;
 }
 
-static ssize_t show_update_interval(struct device *dev,
+static ssize_t update_interval_show(struct device *dev,
                                    struct device_attribute *attr, char *buf)
 {
        struct tmp401_data *data = dev_get_drvdata(dev);
@@ -457,9 +436,9 @@ static ssize_t show_update_interval(struct device *dev,
        return sprintf(buf, "%u\n", data->update_interval);
 }
 
-static ssize_t set_update_interval(struct device *dev,
-                                  struct device_attribute *attr,
-                                  const char *buf, size_t count)
+static ssize_t update_interval_store(struct device *dev,
+                                    struct device_attribute *attr,
+                                    const char *buf, size_t count)
 {
        struct tmp401_data *data = dev_get_drvdata(dev);
        struct i2c_client *client = data->client;
@@ -521,8 +500,7 @@ static SENSOR_DEVICE_ATTR_2(temp2_max_alarm, S_IRUGO, show_status, NULL,
 static SENSOR_DEVICE_ATTR_2(temp2_crit_alarm, S_IRUGO, show_status, NULL,
                            3, TMP432_STATUS_REMOTE1);
 
-static DEVICE_ATTR(update_interval, S_IRUGO | S_IWUSR, show_update_interval,
-                  set_update_interval);
+static DEVICE_ATTR_RW(update_interval);
 
 static struct attribute *tmp401_attributes[] = {
        &sensor_dev_attr_temp1_input.dev_attr.attr,
index d1f209a5feacba257c979efb763c5de1c7884a01..07a0cb0a1f284fe62de02f84d43c3f30589c3f0a 100644 (file)
@@ -88,8 +88,8 @@ static ssize_t show_temp(struct device *dev,
        return sprintf(buf, "%lu\n", ((unsigned long)eax & 0xffffff) * 1000);
 }
 
-static ssize_t show_cpu_vid(struct device *dev,
-                           struct device_attribute *devattr, char *buf)
+static ssize_t cpu0_vid_show(struct device *dev,
+                            struct device_attribute *devattr, char *buf)
 {
        struct via_cputemp_data *data = dev_get_drvdata(dev);
        u32 eax, edx;
@@ -119,7 +119,7 @@ static const struct attribute_group via_cputemp_group = {
 };
 
 /* Optional attributes */
-static DEVICE_ATTR(cpu0_vid, S_IRUGO, show_cpu_vid, NULL);
+static DEVICE_ATTR_RO(cpu0_vid);
 
 static int via_cputemp_probe(struct platform_device *pdev)
 {
index 40dd93c8f9f457483ca8be2b0c2388943315c260..81f35e3a06b8439f455f01cc50daf79960d943f5 100644 (file)
@@ -580,14 +580,14 @@ show_fan_offset(1);
 show_fan_offset(2);
 
 /* Alarms */
-static ssize_t show_alarms(struct device *dev, struct device_attribute *attr,
+static ssize_t alarms_show(struct device *dev, struct device_attribute *attr,
                           char *buf)
 {
        struct via686a_data *data = via686a_update_device(dev);
        return sprintf(buf, "%u\n", data->alarms);
 }
 
-static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
+static DEVICE_ATTR_RO(alarms);
 
 static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
                          char *buf)
@@ -607,13 +607,13 @@ static SENSOR_DEVICE_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL, 15);
 static SENSOR_DEVICE_ATTR(fan1_alarm, S_IRUGO, show_alarm, NULL, 6);
 static SENSOR_DEVICE_ATTR(fan2_alarm, S_IRUGO, show_alarm, NULL, 7);
 
-static ssize_t show_name(struct device *dev, struct device_attribute
+static ssize_t name_show(struct device *dev, struct device_attribute
                         *devattr, char *buf)
 {
        struct via686a_data *data = dev_get_drvdata(dev);
        return sprintf(buf, "%s\n", data->name);
 }
-static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
+static DEVICE_ATTR_RO(name);
 
 static struct attribute *via686a_attributes[] = {
        &sensor_dev_attr_in0_input.dev_attr.attr,
index cb69a8c2ed5b84ba77d0053355ab2a649837e5f7..367b5eb53fb6aa9cf8decc67d3ce637cccd26c5c 100644 (file)
@@ -263,8 +263,8 @@ static ssize_t set_in_max(struct device *dev, struct device_attribute *attr,
 }
 
 /* Special case for input 5 as this has 3.3V scaling built into the chip */
-static ssize_t show_in5(struct device *dev, struct device_attribute *attr,
-               char *buf)
+static ssize_t in5_input_show(struct device *dev,
+                             struct device_attribute *attr, char *buf)
 {
        struct vt8231_data *data = vt8231_update_device(dev);
 
@@ -272,7 +272,7 @@ static ssize_t show_in5(struct device *dev, struct device_attribute *attr,
                (((data->in[5] - 3) * 10000 * 54) / (958 * 34)));
 }
 
-static ssize_t show_in5_min(struct device *dev, struct device_attribute *attr,
+static ssize_t in5_min_show(struct device *dev, struct device_attribute *attr,
                char *buf)
 {
        struct vt8231_data *data = vt8231_update_device(dev);
@@ -281,7 +281,7 @@ static ssize_t show_in5_min(struct device *dev, struct device_attribute *attr,
                (((data->in_min[5] - 3) * 10000 * 54) / (958 * 34)));
 }
 
-static ssize_t show_in5_max(struct device *dev, struct device_attribute *attr,
+static ssize_t in5_max_show(struct device *dev, struct device_attribute *attr,
                char *buf)
 {
        struct vt8231_data *data = vt8231_update_device(dev);
@@ -290,8 +290,9 @@ static ssize_t show_in5_max(struct device *dev, struct device_attribute *attr,
                (((data->in_max[5] - 3) * 10000 * 54) / (958 * 34)));
 }
 
-static ssize_t set_in5_min(struct device *dev, struct device_attribute *attr,
-               const char *buf, size_t count)
+static ssize_t in5_min_store(struct device *dev,
+                            struct device_attribute *attr, const char *buf,
+                            size_t count)
 {
        struct vt8231_data *data = dev_get_drvdata(dev);
        unsigned long val;
@@ -309,8 +310,9 @@ static ssize_t set_in5_min(struct device *dev, struct device_attribute *attr,
        return count;
 }
 
-static ssize_t set_in5_max(struct device *dev, struct device_attribute *attr,
-               const char *buf, size_t count)
+static ssize_t in5_max_store(struct device *dev,
+                            struct device_attribute *attr, const char *buf,
+                            size_t count)
 {
        struct vt8231_data *data = dev_get_drvdata(dev);
        unsigned long val;
@@ -342,34 +344,35 @@ define_voltage_sysfs(2);
 define_voltage_sysfs(3);
 define_voltage_sysfs(4);
 
-static DEVICE_ATTR(in5_input, S_IRUGO, show_in5, NULL);
-static DEVICE_ATTR(in5_min, S_IRUGO | S_IWUSR, show_in5_min, set_in5_min);
-static DEVICE_ATTR(in5_max, S_IRUGO | S_IWUSR, show_in5_max, set_in5_max);
+static DEVICE_ATTR_RO(in5_input);
+static DEVICE_ATTR_RW(in5_min);
+static DEVICE_ATTR_RW(in5_max);
 
 /* Temperatures */
-static ssize_t show_temp0(struct device *dev, struct device_attribute *attr,
-               char *buf)
+static ssize_t temp1_input_show(struct device *dev,
+                               struct device_attribute *attr, char *buf)
 {
        struct vt8231_data *data = vt8231_update_device(dev);
        return sprintf(buf, "%d\n", data->temp[0] * 250);
 }
 
-static ssize_t show_temp0_max(struct device *dev, struct device_attribute *attr,
+static ssize_t temp1_max_show(struct device *dev, struct device_attribute *attr,
                char *buf)
 {
        struct vt8231_data *data = vt8231_update_device(dev);
        return sprintf(buf, "%d\n", data->temp_max[0] * 1000);
 }
 
-static ssize_t show_temp0_min(struct device *dev, struct device_attribute *attr,
-               char *buf)
+static ssize_t temp1_max_hyst_show(struct device *dev,
+                                  struct device_attribute *attr, char *buf)
 {
        struct vt8231_data *data = vt8231_update_device(dev);
        return sprintf(buf, "%d\n", data->temp_min[0] * 1000);
 }
 
-static ssize_t set_temp0_max(struct device *dev, struct device_attribute *attr,
-               const char *buf, size_t count)
+static ssize_t temp1_max_store(struct device *dev,
+                              struct device_attribute *attr, const char *buf,
+                              size_t count)
 {
        struct vt8231_data *data = dev_get_drvdata(dev);
        long val;
@@ -385,8 +388,9 @@ static ssize_t set_temp0_max(struct device *dev, struct device_attribute *attr,
        mutex_unlock(&data->update_lock);
        return count;
 }
-static ssize_t set_temp0_min(struct device *dev, struct device_attribute *attr,
-               const char *buf, size_t count)
+static ssize_t temp1_max_hyst_store(struct device *dev,
+                                   struct device_attribute *attr,
+                                   const char *buf, size_t count)
 {
        struct vt8231_data *data = dev_get_drvdata(dev);
        long val;
@@ -481,10 +485,9 @@ static SENSOR_DEVICE_ATTR(temp##offset##_max, S_IRUGO | S_IWUSR,   \
 static SENSOR_DEVICE_ATTR(temp##offset##_max_hyst, S_IRUGO | S_IWUSR,  \
                show_temp_min, set_temp_min, offset - 1)
 
-static DEVICE_ATTR(temp1_input, S_IRUGO, show_temp0, NULL);
-static DEVICE_ATTR(temp1_max, S_IRUGO | S_IWUSR, show_temp0_max, set_temp0_max);
-static DEVICE_ATTR(temp1_max_hyst, S_IRUGO | S_IWUSR, show_temp0_min,
-                  set_temp0_min);
+static DEVICE_ATTR_RO(temp1_input);
+static DEVICE_ATTR_RW(temp1_max);
+static DEVICE_ATTR_RW(temp1_max_hyst);
 
 define_temperature_sysfs(2);
 define_temperature_sysfs(3);
@@ -603,13 +606,13 @@ define_fan_sysfs(1);
 define_fan_sysfs(2);
 
 /* Alarms */
-static ssize_t show_alarms(struct device *dev, struct device_attribute *attr,
+static ssize_t alarms_show(struct device *dev, struct device_attribute *attr,
                           char *buf)
 {
        struct vt8231_data *data = vt8231_update_device(dev);
        return sprintf(buf, "%d\n", data->alarms);
 }
-static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
+static DEVICE_ATTR_RO(alarms);
 
 static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
                          char *buf)
@@ -633,13 +636,13 @@ static SENSOR_DEVICE_ATTR(in5_alarm, S_IRUGO, show_alarm, NULL, 2);
 static SENSOR_DEVICE_ATTR(fan1_alarm, S_IRUGO, show_alarm, NULL, 6);
 static SENSOR_DEVICE_ATTR(fan2_alarm, S_IRUGO, show_alarm, NULL, 7);
 
-static ssize_t show_name(struct device *dev, struct device_attribute
+static ssize_t name_show(struct device *dev, struct device_attribute
                         *devattr, char *buf)
 {
        struct vt8231_data *data = dev_get_drvdata(dev);
        return sprintf(buf, "%s\n", data->name);
 }
-static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
+static DEVICE_ATTR_RO(name);
 
 static struct attribute *vt8231_attributes_temps[6][5] = {
        {
index 697007afb99c2221e931da3617704dd0da3a1033..ab346ed142debaa7f51fb73d3addad96aa89d01b 100644 (file)
@@ -1687,14 +1687,14 @@ store_##reg(struct device *dev, struct device_attribute *attr, \
 
 fan_time_functions(fan_stop_time, FAN_STOP_TIME)
 
-static ssize_t show_name(struct device *dev, struct device_attribute *attr,
+static ssize_t name_show(struct device *dev, struct device_attribute *attr,
                         char *buf)
 {
        struct w83627ehf_data *data = dev_get_drvdata(dev);
 
        return sprintf(buf, "%s\n", data->name);
 }
-static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
+static DEVICE_ATTR_RO(name);
 
 static struct sensor_device_attribute sda_sf3_arrays_fan4[] = {
        SENSOR_ATTR(pwm4_stop_time, S_IWUSR | S_IRUGO, show_fan_stop_time,
@@ -1754,12 +1754,12 @@ static struct sensor_device_attribute sda_sf3_max_step_arrays[] = {
 };
 
 static ssize_t
-show_vid(struct device *dev, struct device_attribute *attr, char *buf)
+cpu0_vid_show(struct device *dev, struct device_attribute *attr, char *buf)
 {
        struct w83627ehf_data *data = dev_get_drvdata(dev);
        return sprintf(buf, "%d\n", vid_from_reg(data->vid, data->vrm));
 }
-static DEVICE_ATTR(cpu0_vid, S_IRUGO, show_vid, NULL);
+static DEVICE_ATTR_RO(cpu0_vid);
 
 
 /* Case open detection */
index 721295b9a05176704b156e7dd799ad17da5edd86..8ac89d0781ccc68336eff6f13270063ed69a3ee9 100644 (file)
@@ -575,26 +575,30 @@ static ssize_t show_in_0(struct w83627hf_data *data, char *buf, u8 reg)
        return sprintf(buf,"%ld\n", in0);
 }
 
-static ssize_t show_regs_in_0(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t in0_input_show(struct device *dev,
+                             struct device_attribute *attr, char *buf)
 {
        struct w83627hf_data *data = w83627hf_update_device(dev);
        return show_in_0(data, buf, data->in[0]);
 }
 
-static ssize_t show_regs_in_min0(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t in0_min_show(struct device *dev, struct device_attribute *attr,
+                           char *buf)
 {
        struct w83627hf_data *data = w83627hf_update_device(dev);
        return show_in_0(data, buf, data->in_min[0]);
 }
 
-static ssize_t show_regs_in_max0(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t in0_max_show(struct device *dev, struct device_attribute *attr,
+                           char *buf)
 {
        struct w83627hf_data *data = w83627hf_update_device(dev);
        return show_in_0(data, buf, data->in_max[0]);
 }
 
-static ssize_t store_regs_in_min0(struct device *dev, struct device_attribute *attr,
-       const char *buf, size_t count)
+static ssize_t in0_min_store(struct device *dev,
+                            struct device_attribute *attr, const char *buf,
+                            size_t count)
 {
        struct w83627hf_data *data = dev_get_drvdata(dev);
        unsigned long val;
@@ -622,8 +626,9 @@ static ssize_t store_regs_in_min0(struct device *dev, struct device_attribute *a
        return count;
 }
 
-static ssize_t store_regs_in_max0(struct device *dev, struct device_attribute *attr,
-       const char *buf, size_t count)
+static ssize_t in0_max_store(struct device *dev,
+                            struct device_attribute *attr, const char *buf,
+                            size_t count)
 {
        struct w83627hf_data *data = dev_get_drvdata(dev);
        unsigned long val;
@@ -651,11 +656,9 @@ static ssize_t store_regs_in_max0(struct device *dev, struct device_attribute *a
        return count;
 }
 
-static DEVICE_ATTR(in0_input, S_IRUGO, show_regs_in_0, NULL);
-static DEVICE_ATTR(in0_min, S_IRUGO | S_IWUSR,
-       show_regs_in_min0, store_regs_in_min0);
-static DEVICE_ATTR(in0_max, S_IRUGO | S_IWUSR,
-       show_regs_in_max0, store_regs_in_max0);
+static DEVICE_ATTR_RO(in0_input);
+static DEVICE_ATTR_RW(in0_min);
+static DEVICE_ATTR_RW(in0_max);
 
 static ssize_t
 show_fan_input(struct device *dev, struct device_attribute *devattr, char *buf)
@@ -796,21 +799,22 @@ sysfs_temp_decl(2);
 sysfs_temp_decl(3);
 
 static ssize_t
-show_vid_reg(struct device *dev, struct device_attribute *attr, char *buf)
+cpu0_vid_show(struct device *dev, struct device_attribute *attr, char *buf)
 {
        struct w83627hf_data *data = w83627hf_update_device(dev);
        return sprintf(buf, "%ld\n", (long) vid_from_reg(data->vid, data->vrm));
 }
-static DEVICE_ATTR(cpu0_vid, S_IRUGO, show_vid_reg, NULL);
+static DEVICE_ATTR_RO(cpu0_vid);
 
 static ssize_t
-show_vrm_reg(struct device *dev, struct device_attribute *attr, char *buf)
+vrm_show(struct device *dev, struct device_attribute *attr, char *buf)
 {
        struct w83627hf_data *data = dev_get_drvdata(dev);
        return sprintf(buf, "%ld\n", (long) data->vrm);
 }
 static ssize_t
-store_vrm_reg(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+vrm_store(struct device *dev, struct device_attribute *attr, const char *buf,
+         size_t count)
 {
        struct w83627hf_data *data = dev_get_drvdata(dev);
        unsigned long val;
@@ -826,15 +830,15 @@ store_vrm_reg(struct device *dev, struct device_attribute *attr, const char *buf
 
        return count;
 }
-static DEVICE_ATTR(vrm, S_IRUGO | S_IWUSR, show_vrm_reg, store_vrm_reg);
+static DEVICE_ATTR_RW(vrm);
 
 static ssize_t
-show_alarms_reg(struct device *dev, struct device_attribute *attr, char *buf)
+alarms_show(struct device *dev, struct device_attribute *attr, char *buf)
 {
        struct w83627hf_data *data = w83627hf_update_device(dev);
        return sprintf(buf, "%ld\n", (long) data->alarms);
 }
-static DEVICE_ATTR(alarms, S_IRUGO, show_alarms_reg, NULL);
+static DEVICE_ATTR_RO(alarms);
 
 static ssize_t
 show_alarm(struct device *dev, struct device_attribute *attr, char *buf)
@@ -860,7 +864,7 @@ static SENSOR_DEVICE_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL, 5);
 static SENSOR_DEVICE_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL, 13);
 
 static ssize_t
-show_beep_mask(struct device *dev, struct device_attribute *attr, char *buf)
+beep_mask_show(struct device *dev, struct device_attribute *attr, char *buf)
 {
        struct w83627hf_data *data = w83627hf_update_device(dev);
        return sprintf(buf, "%ld\n",
@@ -868,7 +872,7 @@ show_beep_mask(struct device *dev, struct device_attribute *attr, char *buf)
 }
 
 static ssize_t
-store_beep_mask(struct device *dev, struct device_attribute *attr,
+beep_mask_store(struct device *dev, struct device_attribute *attr,
                const char *buf, size_t count)
 {
        struct w83627hf_data *data = dev_get_drvdata(dev);
@@ -895,8 +899,7 @@ store_beep_mask(struct device *dev, struct device_attribute *attr,
        return count;
 }
 
-static DEVICE_ATTR(beep_mask, S_IRUGO | S_IWUSR,
-                  show_beep_mask, store_beep_mask);
+static DEVICE_ATTR_RW(beep_mask);
 
 static ssize_t
 show_beep(struct device *dev, struct device_attribute *attr, char *buf)
@@ -1264,13 +1267,13 @@ sysfs_temp_type(2);
 sysfs_temp_type(3);
 
 static ssize_t
-show_name(struct device *dev, struct device_attribute *devattr, char *buf)
+name_show(struct device *dev, struct device_attribute *devattr, char *buf)
 {
        struct w83627hf_data *data = dev_get_drvdata(dev);
 
        return sprintf(buf, "%s\n", data->name);
 }
-static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
+static DEVICE_ATTR_RO(name);
 
 static int __init w83627hf_find(int sioaddr, unsigned short *addr,
                                struct w83627hf_sio_data *sio_data)
index 54848fdd181e2212c302355e99f0920464c98c29..246fb2365126b5b013c2df0b09ef54841e91a4a9 100644 (file)
@@ -416,24 +416,24 @@ sysfs_temp_offsets(2);
 sysfs_temp_offsets(3);
 
 static ssize_t
-show_vid_reg(struct device *dev, struct device_attribute *attr, char *buf)
+cpu0_vid_show(struct device *dev, struct device_attribute *attr, char *buf)
 {
        struct w83781d_data *data = w83781d_update_device(dev);
        return sprintf(buf, "%ld\n", (long) vid_from_reg(data->vid, data->vrm));
 }
 
-static DEVICE_ATTR(cpu0_vid, S_IRUGO, show_vid_reg, NULL);
+static DEVICE_ATTR_RO(cpu0_vid);
 
 static ssize_t
-show_vrm_reg(struct device *dev, struct device_attribute *attr, char *buf)
+vrm_show(struct device *dev, struct device_attribute *attr, char *buf)
 {
        struct w83781d_data *data = dev_get_drvdata(dev);
        return sprintf(buf, "%ld\n", (long) data->vrm);
 }
 
 static ssize_t
-store_vrm_reg(struct device *dev, struct device_attribute *attr,
-             const char *buf, size_t count)
+vrm_store(struct device *dev, struct device_attribute *attr, const char *buf,
+         size_t count)
 {
        struct w83781d_data *data = dev_get_drvdata(dev);
        unsigned long val;
@@ -447,16 +447,16 @@ store_vrm_reg(struct device *dev, struct device_attribute *attr,
        return count;
 }
 
-static DEVICE_ATTR(vrm, S_IRUGO | S_IWUSR, show_vrm_reg, store_vrm_reg);
+static DEVICE_ATTR_RW(vrm);
 
 static ssize_t
-show_alarms_reg(struct device *dev, struct device_attribute *attr, char *buf)
+alarms_show(struct device *dev, struct device_attribute *attr, char *buf)
 {
        struct w83781d_data *data = w83781d_update_device(dev);
        return sprintf(buf, "%u\n", data->alarms);
 }
 
-static DEVICE_ATTR(alarms, S_IRUGO, show_alarms_reg, NULL);
+static DEVICE_ATTR_RO(alarms);
 
 static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
                char *buf)
@@ -491,7 +491,7 @@ static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 4);
 static SENSOR_DEVICE_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL, 5);
 static SENSOR_DEVICE_ATTR(temp3_alarm, S_IRUGO, show_temp3_alarm, NULL, 0);
 
-static ssize_t show_beep_mask(struct device *dev,
+static ssize_t beep_mask_show(struct device *dev,
                               struct device_attribute *attr, char *buf)
 {
        struct w83781d_data *data = w83781d_update_device(dev);
@@ -500,7 +500,7 @@ static ssize_t show_beep_mask(struct device *dev,
 }
 
 static ssize_t
-store_beep_mask(struct device *dev, struct device_attribute *attr,
+beep_mask_store(struct device *dev, struct device_attribute *attr,
                const char *buf, size_t count)
 {
        struct w83781d_data *data = dev_get_drvdata(dev);
@@ -527,8 +527,7 @@ store_beep_mask(struct device *dev, struct device_attribute *attr,
        return count;
 }
 
-static DEVICE_ATTR(beep_mask, S_IRUGO | S_IWUSR,
-               show_beep_mask, store_beep_mask);
+static DEVICE_ATTR_RW(beep_mask);
 
 static ssize_t show_beep(struct device *dev, struct device_attribute *attr,
                char *buf)
@@ -708,7 +707,7 @@ show_pwm(struct device *dev, struct device_attribute *da, char *buf)
 }
 
 static ssize_t
-show_pwm2_enable(struct device *dev, struct device_attribute *da, char *buf)
+pwm2_enable_show(struct device *dev, struct device_attribute *da, char *buf)
 {
        struct w83781d_data *data = w83781d_update_device(dev);
        return sprintf(buf, "%d\n", (int)data->pwm2_enable);
@@ -736,7 +735,7 @@ store_pwm(struct device *dev, struct device_attribute *da, const char *buf,
 }
 
 static ssize_t
-store_pwm2_enable(struct device *dev, struct device_attribute *da,
+pwm2_enable_store(struct device *dev, struct device_attribute *da,
                const char *buf, size_t count)
 {
        struct w83781d_data *data = dev_get_drvdata(dev);
@@ -778,8 +777,7 @@ static SENSOR_DEVICE_ATTR(pwm2, S_IRUGO | S_IWUSR, show_pwm, store_pwm, 1);
 static SENSOR_DEVICE_ATTR(pwm3, S_IRUGO | S_IWUSR, show_pwm, store_pwm, 2);
 static SENSOR_DEVICE_ATTR(pwm4, S_IRUGO | S_IWUSR, show_pwm, store_pwm, 3);
 /* only PWM2 can be enabled/disabled */
-static DEVICE_ATTR(pwm2_enable, S_IRUGO | S_IWUSR,
-               show_pwm2_enable, store_pwm2_enable);
+static DEVICE_ATTR_RW(pwm2_enable);
 
 static ssize_t
 show_sensor(struct device *dev, struct device_attribute *da, char *buf)
@@ -1616,12 +1614,12 @@ static unsigned short isa_address = 0x290;
  * we must create it by ourselves.
  */
 static ssize_t
-show_name(struct device *dev, struct device_attribute *devattr, char *buf)
+name_show(struct device *dev, struct device_attribute *devattr, char *buf)
 {
        struct w83781d_data *data = dev_get_drvdata(dev);
        return sprintf(buf, "%s\n", data->name);
 }
-static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
+static DEVICE_ATTR_RO(name);
 
 static struct w83781d_data *w83781d_data_if_isa(void)
 {
index 001df856913feba93790bf9383b2023115defad3..8af6081b4ab418c16008ac8c958a2aee49b45f8e 100644 (file)
@@ -1041,14 +1041,14 @@ static struct sensor_device_attribute sda_temp_alarm[] = {
 };
 
 /* get realtime status of all sensors items: voltage, temp, fan */
-static ssize_t show_alarms_reg(struct device *dev,
-                               struct device_attribute *attr, char *buf)
+static ssize_t alarms_show(struct device *dev, struct device_attribute *attr,
+                          char *buf)
 {
        struct w83791d_data *data = w83791d_update_device(dev);
        return sprintf(buf, "%u\n", data->alarms);
 }
 
-static DEVICE_ATTR(alarms, S_IRUGO, show_alarms_reg, NULL);
+static DEVICE_ATTR_RO(alarms);
 
 /* Beep control */
 
@@ -1147,25 +1147,24 @@ static struct sensor_device_attribute sda_beep_ctrl[] = {
 };
 
 /* cpu voltage regulation information */
-static ssize_t show_vid_reg(struct device *dev,
-                               struct device_attribute *attr, char *buf)
+static ssize_t cpu0_vid_show(struct device *dev,
+                            struct device_attribute *attr, char *buf)
 {
        struct w83791d_data *data = w83791d_update_device(dev);
        return sprintf(buf, "%d\n", vid_from_reg(data->vid, data->vrm));
 }
 
-static DEVICE_ATTR(cpu0_vid, S_IRUGO, show_vid_reg, NULL);
+static DEVICE_ATTR_RO(cpu0_vid);
 
-static ssize_t show_vrm_reg(struct device *dev,
-                               struct device_attribute *attr, char *buf)
+static ssize_t vrm_show(struct device *dev, struct device_attribute *attr,
+                       char *buf)
 {
        struct w83791d_data *data = dev_get_drvdata(dev);
        return sprintf(buf, "%d\n", data->vrm);
 }
 
-static ssize_t store_vrm_reg(struct device *dev,
-                               struct device_attribute *attr,
-                               const char *buf, size_t count)
+static ssize_t vrm_store(struct device *dev, struct device_attribute *attr,
+                        const char *buf, size_t count)
 {
        struct w83791d_data *data = dev_get_drvdata(dev);
        unsigned long val;
@@ -1188,7 +1187,7 @@ static ssize_t store_vrm_reg(struct device *dev,
        return count;
 }
 
-static DEVICE_ATTR(vrm, S_IRUGO | S_IWUSR, show_vrm_reg, store_vrm_reg);
+static DEVICE_ATTR_RW(vrm);
 
 #define IN_UNIT_ATTRS(X) \
        &sda_in_input[X].dev_attr.attr, \
index 0a8bce726b4b037797126ea8b8b5cf8d229bb253..d764602d70dbef20b251ac0e74281afd934e52ec 100644 (file)
@@ -578,7 +578,7 @@ static ssize_t store_temp23(struct device *dev, struct device_attribute *attr,
 
 /* get realtime status of all sensors items: voltage, temp, fan */
 static ssize_t
-show_alarms_reg(struct device *dev, struct device_attribute *attr, char *buf)
+alarms_show(struct device *dev, struct device_attribute *attr, char *buf)
 {
        struct w83792d_data *data = w83792d_update_device(dev);
        return sprintf(buf, "%d\n", data->alarms);
@@ -735,16 +735,16 @@ store_pwm_mode(struct device *dev, struct device_attribute *attr,
 }
 
 static ssize_t
-show_chassis_clear(struct device *dev, struct device_attribute *attr,
-                       char *buf)
+intrusion0_alarm_show(struct device *dev, struct device_attribute *attr,
+                     char *buf)
 {
        struct w83792d_data *data = w83792d_update_device(dev);
        return sprintf(buf, "%d\n", data->chassis);
 }
 
 static ssize_t
-store_chassis_clear(struct device *dev, struct device_attribute *attr,
-                       const char *buf, size_t count)
+intrusion0_alarm_store(struct device *dev, struct device_attribute *attr,
+                      const char *buf, size_t count)
 {
        struct i2c_client *client = to_i2c_client(dev);
        struct w83792d_data *data = i2c_get_clientdata(client);
@@ -1047,7 +1047,7 @@ static SENSOR_DEVICE_ATTR_2(temp2_max_hyst, S_IRUGO | S_IWUSR,
                        show_temp23, store_temp23, 0, 4);
 static SENSOR_DEVICE_ATTR_2(temp3_max_hyst, S_IRUGO | S_IWUSR,
                        show_temp23, store_temp23, 1, 4);
-static DEVICE_ATTR(alarms, S_IRUGO, show_alarms_reg, NULL);
+static DEVICE_ATTR_RO(alarms);
 static SENSOR_DEVICE_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 0);
 static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 1);
 static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 2);
@@ -1067,8 +1067,7 @@ static SENSOR_DEVICE_ATTR(in8_alarm, S_IRUGO, show_alarm, NULL, 20);
 static SENSOR_DEVICE_ATTR(fan4_alarm, S_IRUGO, show_alarm, NULL, 21);
 static SENSOR_DEVICE_ATTR(fan5_alarm, S_IRUGO, show_alarm, NULL, 22);
 static SENSOR_DEVICE_ATTR(fan6_alarm, S_IRUGO, show_alarm, NULL, 23);
-static DEVICE_ATTR(intrusion0_alarm, S_IRUGO | S_IWUSR,
-                       show_chassis_clear, store_chassis_clear);
+static DEVICE_ATTR_RW(intrusion0_alarm);
 static SENSOR_DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 0);
 static SENSOR_DEVICE_ATTR(pwm2, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 1);
 static SENSOR_DEVICE_ATTR(pwm3, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 2);
index 816aa6caf5d553ef2538eb9ebdcddab40f96c588..dab5c515d5a3d141449645717edbd35688b13fbb 100644 (file)
@@ -324,7 +324,7 @@ static struct i2c_driver w83793_driver = {
 };
 
 static ssize_t
-show_vrm(struct device *dev, struct device_attribute *attr, char *buf)
+vrm_show(struct device *dev, struct device_attribute *attr, char *buf)
 {
        struct w83793_data *data = dev_get_drvdata(dev);
        return sprintf(buf, "%d\n", data->vrm);
@@ -342,7 +342,7 @@ show_vid(struct device *dev, struct device_attribute *attr, char *buf)
 }
 
 static ssize_t
-store_vrm(struct device *dev, struct device_attribute *attr,
+vrm_store(struct device *dev, struct device_attribute *attr,
          const char *buf, size_t count)
 {
        struct w83793_data *data = dev_get_drvdata(dev);
@@ -1169,7 +1169,7 @@ static struct sensor_device_attribute_2 w83793_vid[] = {
        SENSOR_ATTR_2(cpu0_vid, S_IRUGO, show_vid, NULL, NOT_USED, 0),
        SENSOR_ATTR_2(cpu1_vid, S_IRUGO, show_vid, NULL, NOT_USED, 1),
 };
-static DEVICE_ATTR(vrm, S_IWUSR | S_IRUGO, show_vrm, store_vrm);
+static DEVICE_ATTR_RW(vrm);
 
 static struct sensor_device_attribute_2 sda_single_files[] = {
        SENSOR_ATTR_2(intrusion0_alarm, S_IWUSR | S_IRUGO, show_alarm_beep,
index 686971263bef19a2fcc159432a6e6e46b1e16771..45d6771fac8ce1efcd093a710c6c3fad64ca33b8 100644 (file)
@@ -962,10 +962,6 @@ static int cdns_i2c_probe(struct platform_device *pdev)
                goto err_clk_dis;
        }
 
-       ret = i2c_add_adapter(&id->adap);
-       if (ret < 0)
-               goto err_clk_dis;
-
        /*
         * Cadence I2C controller has a bug wherein it generates
         * invalid read transaction after HW timeout in master receiver mode.
@@ -975,6 +971,10 @@ static int cdns_i2c_probe(struct platform_device *pdev)
         */
        cdns_i2c_writereg(CDNS_I2C_TIMEOUT_MAX, CDNS_I2C_TIME_OUT_OFFSET);
 
+       ret = i2c_add_adapter(&id->adap);
+       if (ret < 0)
+               goto err_clk_dis;
+
        dev_info(&pdev->dev, "%u kHz mmio %08lx irq %d\n",
                 id->i2c_clk / 1000, (unsigned long)r_mem->start, id->irq);
 
index 6d81c56184d33d4757414fd748ace9013c8a3b14..e9db857c62268c61099cb34a91c8f7ccf3e5284a 100644 (file)
@@ -475,30 +475,28 @@ static int i2c_dw_wait_bus_not_busy(struct dw_i2c_dev *dev)
 static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
 {
        struct i2c_msg *msgs = dev->msgs;
-       u32 ic_tar = 0;
+       u32 ic_con, ic_tar = 0;
 
        /* Disable the adapter */
        __i2c_dw_enable_and_wait(dev, false);
 
        /* if the slave address is ten bit address, enable 10BITADDR */
-       if (dev->dynamic_tar_update_enabled) {
+       ic_con = dw_readl(dev, DW_IC_CON);
+       if (msgs[dev->msg_write_idx].flags & I2C_M_TEN) {
+               ic_con |= DW_IC_CON_10BITADDR_MASTER;
                /*
                 * If I2C_DYNAMIC_TAR_UPDATE is set, the 10-bit addressing
-                * mode has to be enabled via bit 12 of IC_TAR register,
-                * otherwise bit 4 of IC_CON is used.
+                * mode has to be enabled via bit 12 of IC_TAR register.
+                * We set it always as I2C_DYNAMIC_TAR_UPDATE can't be
+                * detected from registers.
                 */
-               if (msgs[dev->msg_write_idx].flags & I2C_M_TEN)
-                       ic_tar = DW_IC_TAR_10BITADDR_MASTER;
+               ic_tar = DW_IC_TAR_10BITADDR_MASTER;
        } else {
-               u32 ic_con = dw_readl(dev, DW_IC_CON);
-
-               if (msgs[dev->msg_write_idx].flags & I2C_M_TEN)
-                       ic_con |= DW_IC_CON_10BITADDR_MASTER;
-               else
-                       ic_con &= ~DW_IC_CON_10BITADDR_MASTER;
-               dw_writel(dev, ic_con, DW_IC_CON);
+               ic_con &= ~DW_IC_CON_10BITADDR_MASTER;
        }
 
+       dw_writel(dev, ic_con, DW_IC_CON);
+
        /*
         * Set the slave (target) address and enable 10-bit addressing mode
         * if applicable.
@@ -963,7 +961,6 @@ int i2c_dw_probe(struct dw_i2c_dev *dev)
 {
        struct i2c_adapter *adap = &dev->adapter;
        int r;
-       u32 reg;
 
        init_completion(&dev->cmd_complete);
 
@@ -971,26 +968,6 @@ int i2c_dw_probe(struct dw_i2c_dev *dev)
        if (r)
                return r;
 
-       r = i2c_dw_acquire_lock(dev);
-       if (r)
-               return r;
-
-       /*
-        * Test if dynamic TAR update is enabled in this controller by writing
-        * to IC_10BITADDR_MASTER field in IC_CON: when it is enabled this
-        * field is read-only so it should not succeed
-        */
-       reg = dw_readl(dev, DW_IC_CON);
-       dw_writel(dev, reg ^ DW_IC_CON_10BITADDR_MASTER, DW_IC_CON);
-
-       if ((dw_readl(dev, DW_IC_CON) & DW_IC_CON_10BITADDR_MASTER) ==
-           (reg & DW_IC_CON_10BITADDR_MASTER)) {
-               dev->dynamic_tar_update_enabled = true;
-               dev_dbg(dev->dev, "Dynamic TAR update enabled");
-       }
-
-       i2c_dw_release_lock(dev);
-
        snprintf(adap->name, sizeof(adap->name),
                 "Synopsys DesignWare I2C adapter");
        adap->retries = 3;
index 26250b425e2f1046fa0b0483f3e59fb03ed33f75..c1db3a5a340f599b6bee5c0165112703e2e082fc 100644 (file)
@@ -125,7 +125,6 @@ struct dw_i2c_dev {
        int                     (*acquire_lock)(struct dw_i2c_dev *dev);
        void                    (*release_lock)(struct dw_i2c_dev *dev);
        bool                    pm_runtime_disabled;
-       bool                    dynamic_tar_update_enabled;
 };
 
 #define ACCESS_SWAP            0x00000001
index c62b7cd475f87694bd9eeacba60ae2b75733ff79..3310f2e0dbd3b4e8ef2fb0386530c11deba4192f 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/module.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
+#include <linux/pinctrl/consumer.h>
 #include <linux/platform_device.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
@@ -636,12 +637,31 @@ static int lpi2c_imx_remove(struct platform_device *pdev)
        return 0;
 }
 
+#ifdef CONFIG_PM_SLEEP
+static int lpi2c_imx_suspend(struct device *dev)
+{
+       pinctrl_pm_select_sleep_state(dev);
+
+       return 0;
+}
+
+static int lpi2c_imx_resume(struct device *dev)
+{
+       pinctrl_pm_select_default_state(dev);
+
+       return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(imx_lpi2c_pm, lpi2c_imx_suspend, lpi2c_imx_resume);
+
 static struct platform_driver lpi2c_imx_driver = {
        .probe = lpi2c_imx_probe,
        .remove = lpi2c_imx_remove,
        .driver = {
                .name = DRIVER_NAME,
                .of_match_table = lpi2c_imx_of_match,
+               .pm = &imx_lpi2c_pm,
        },
 };
 
index e34d82e79b988a781010cad1e0f283617dfb8471..c21ca7bf2efe4f02d420ef53c679b321eceb8fe6 100644 (file)
@@ -58,7 +58,7 @@
 #define SMBSLVDAT      (0xC + piix4_smba)
 
 /* count for request_region */
-#define SMBIOSIZE      8
+#define SMBIOSIZE      9
 
 /* PCI Address Constants */
 #define SMBBA          0x090
@@ -592,6 +592,8 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr,
        u8 port;
        int retval;
 
+       mutex_lock(&piix4_mutex_sb800);
+
        /* Request the SMBUS semaphore, avoid conflicts with the IMC */
        smbslvcnt  = inb_p(SMBSLVCNT);
        do {
@@ -605,10 +607,10 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr,
                usleep_range(1000, 2000);
        } while (--retries);
        /* SMBus is still owned by the IMC, we give up */
-       if (!retries)
+       if (!retries) {
+               mutex_unlock(&piix4_mutex_sb800);
                return -EBUSY;
-
-       mutex_lock(&piix4_mutex_sb800);
+       }
 
        outb_p(piix4_port_sel_sb800, SB800_PIIX4_SMB_IDX);
        smba_en_lo = inb_p(SB800_PIIX4_SMB_IDX + 1);
@@ -623,11 +625,11 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr,
 
        outb_p(smba_en_lo, SB800_PIIX4_SMB_IDX + 1);
 
-       mutex_unlock(&piix4_mutex_sb800);
-
        /* Release the semaphore */
        outb_p(smbslvcnt | 0x20, SMBSLVCNT);
 
+       mutex_unlock(&piix4_mutex_sb800);
+
        return retval;
 }
 
index 2bbf0c521bebb5c44840a8ce5296024910d8d73d..7d61b566e148dddd21ce2986cba233567802f93c 100644 (file)
@@ -775,7 +775,7 @@ static int palmas_adc_wakeup_reset(struct palmas_gpadc *adc)
 
 static int palmas_gpadc_suspend(struct device *dev)
 {
-       struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+       struct iio_dev *indio_dev = dev_get_drvdata(dev);
        struct palmas_gpadc *adc = iio_priv(indio_dev);
        int wakeup = adc->wakeup1_enable || adc->wakeup2_enable;
        int ret;
@@ -798,7 +798,7 @@ static int palmas_gpadc_suspend(struct device *dev)
 
 static int palmas_gpadc_resume(struct device *dev)
 {
-       struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+       struct iio_dev *indio_dev = dev_get_drvdata(dev);
        struct palmas_gpadc *adc = iio_priv(indio_dev);
        int wakeup = adc->wakeup1_enable || adc->wakeup2_enable;
        int ret;
index 9a081465c42f4225d26747a734c5daceea68c842..6bb23a49e81eb8cf304a2f81f50c5e36a91e2d48 100644 (file)
@@ -422,7 +422,7 @@ MODULE_DEVICE_TABLE(of, afe4403_of_match);
 
 static int __maybe_unused afe4403_suspend(struct device *dev)
 {
-       struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+       struct iio_dev *indio_dev = spi_get_drvdata(to_spi_device(dev));
        struct afe4403_data *afe = iio_priv(indio_dev);
        int ret;
 
@@ -443,7 +443,7 @@ static int __maybe_unused afe4403_suspend(struct device *dev)
 
 static int __maybe_unused afe4403_resume(struct device *dev)
 {
-       struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+       struct iio_dev *indio_dev = spi_get_drvdata(to_spi_device(dev));
        struct afe4403_data *afe = iio_priv(indio_dev);
        int ret;
 
index 45266404f7e3b5bd7c7790a0b05ff298fc24d5a9..964f5231a831c437c277e4bcfe292720104043f7 100644 (file)
@@ -428,7 +428,7 @@ MODULE_DEVICE_TABLE(of, afe4404_of_match);
 
 static int __maybe_unused afe4404_suspend(struct device *dev)
 {
-       struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+       struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
        struct afe4404_data *afe = iio_priv(indio_dev);
        int ret;
 
@@ -449,7 +449,7 @@ static int __maybe_unused afe4404_suspend(struct device *dev)
 
 static int __maybe_unused afe4404_resume(struct device *dev)
 {
-       struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+       struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
        struct afe4404_data *afe = iio_priv(indio_dev);
        int ret;
 
index 90ab8a2d2846f8a8591ee6b1615dce2c984020ef..183c14329d6e350f6325b0e77a95dcb56de892dd 100644 (file)
@@ -238,7 +238,7 @@ static irqreturn_t max30100_interrupt_handler(int irq, void *private)
 
        mutex_lock(&data->lock);
 
-       while (cnt || (cnt = max30100_fifo_count(data) > 0)) {
+       while (cnt || (cnt = max30100_fifo_count(data)) > 0) {
                ret = max30100_read_measurement(data);
                if (ret)
                        break;
index 9c47bc98f3acdea4cb4b56b23a72e582c34a413b..2a22ad92033306d02eec60f288c00a65d5186c37 100644 (file)
@@ -71,7 +71,8 @@
  * a) select an implementation using busy loop polling on those systems
  * b) use the checksum to do some probabilistic decoding
  */
-#define DHT11_START_TRANSMISSION       18  /* ms */
+#define DHT11_START_TRANSMISSION_MIN   18000  /* us */
+#define DHT11_START_TRANSMISSION_MAX   20000  /* us */
 #define DHT11_MIN_TIMERES      34000  /* ns */
 #define DHT11_THRESHOLD                49000  /* ns */
 #define DHT11_AMBIG_LOW                23000  /* ns */
@@ -228,7 +229,8 @@ static int dht11_read_raw(struct iio_dev *iio_dev,
                ret = gpio_direction_output(dht11->gpio, 0);
                if (ret)
                        goto err;
-               msleep(DHT11_START_TRANSMISSION);
+               usleep_range(DHT11_START_TRANSMISSION_MIN,
+                            DHT11_START_TRANSMISSION_MAX);
                ret = gpio_direction_input(dht11->gpio);
                if (ret)
                        goto err;
index e7dcfac877ca2eb7b86601a9115b4c2fc27f4c01..3e70a9c5d79d5a50ba3be228cb4174d1f66c98d7 100644 (file)
@@ -2811,7 +2811,8 @@ static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
        if (!src_addr || !src_addr->sa_family) {
                src_addr = (struct sockaddr *) &id->route.addr.src_addr;
                src_addr->sa_family = dst_addr->sa_family;
-               if (dst_addr->sa_family == AF_INET6) {
+               if (IS_ENABLED(CONFIG_IPV6) &&
+                   dst_addr->sa_family == AF_INET6) {
                        struct sockaddr_in6 *src_addr6 = (struct sockaddr_in6 *) src_addr;
                        struct sockaddr_in6 *dst_addr6 = (struct sockaddr_in6 *) dst_addr;
                        src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id;
index 1e62a5f0cb28203e0732b9915840fd9f02d45701..4609b921f899c9d7481b86825f18fe076a6f732c 100644 (file)
@@ -134,6 +134,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
                 IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND));
 
        if (access & IB_ACCESS_ON_DEMAND) {
+               put_pid(umem->pid);
                ret = ib_umem_odp_get(context, umem);
                if (ret) {
                        kfree(umem);
@@ -149,6 +150,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
 
        page_list = (struct page **) __get_free_page(GFP_KERNEL);
        if (!page_list) {
+               put_pid(umem->pid);
                kfree(umem);
                return ERR_PTR(-ENOMEM);
        }
index b9efadfffb4f64b39e991230b25e676c9a23c777..e66e75921797446019fc67974a2baf4e1d4980a5 100644 (file)
 
 #define put_ep(ep) { \
        PDBG("put_ep (via %s:%u) ep %p refcnt %d\n", __func__, __LINE__,  \
-            ep, atomic_read(&((ep)->kref.refcount))); \
-       WARN_ON(atomic_read(&((ep)->kref.refcount)) < 1); \
+            ep, kref_read(&((ep)->kref))); \
+       WARN_ON(kref_read(&((ep)->kref)) < 1); \
        kref_put(&((ep)->kref), __free_ep); \
 }
 
 #define get_ep(ep) { \
        PDBG("get_ep (via %s:%u) ep %p, refcnt %d\n", __func__, __LINE__, \
-            ep, atomic_read(&((ep)->kref.refcount))); \
+            ep, kref_read(&((ep)->kref))); \
        kref_get(&((ep)->kref));  \
 }
 
index 9d5fe1853da46e54e9d635c5d101b688f4a8119a..6262dc035f3cea4c9613d96f67ec13e76a18643e 100644 (file)
@@ -1135,16 +1135,7 @@ static int iwch_query_port(struct ib_device *ibdev,
 
        memset(props, 0, sizeof(struct ib_port_attr));
        props->max_mtu = IB_MTU_4096;
-       if (netdev->mtu >= 4096)
-               props->active_mtu = IB_MTU_4096;
-       else if (netdev->mtu >= 2048)
-               props->active_mtu = IB_MTU_2048;
-       else if (netdev->mtu >= 1024)
-               props->active_mtu = IB_MTU_1024;
-       else if (netdev->mtu >= 512)
-               props->active_mtu = IB_MTU_512;
-       else
-               props->active_mtu = IB_MTU_256;
+       props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
 
        if (!netif_carrier_ok(netdev))
                props->state = IB_PORT_DOWN;
index d939980a708fdcf1daa6a7a53686cb6509ef2bb5..a9194db7f9b88ead8be50b18051baf653d7aa505 100644 (file)
@@ -961,7 +961,7 @@ int iwch_modify_qp(struct iwch_dev *rhp, struct iwch_qp *qhp,
        case IWCH_QP_STATE_RTS:
                switch (attrs->next_state) {
                case IWCH_QP_STATE_CLOSING:
-                       BUG_ON(atomic_read(&qhp->ep->com.kref.refcount) < 2);
+                       BUG_ON(kref_read(&qhp->ep->com.kref) < 2);
                        qhp->attr.state = IWCH_QP_STATE_CLOSING;
                        if (!internal) {
                                abort=0;
index f1510cc76d2dbe7027e81f5495f53bd7b6588536..9398143d7c5e93a01d5b35c67ae0fdd4e0126483 100644 (file)
@@ -1804,20 +1804,21 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
        skb_trim(skb, dlen);
        mutex_lock(&ep->com.mutex);
 
-       /* update RX credits */
-       update_rx_credits(ep, dlen);
-
        switch (ep->com.state) {
        case MPA_REQ_SENT:
+               update_rx_credits(ep, dlen);
                ep->rcv_seq += dlen;
                disconnect = process_mpa_reply(ep, skb);
                break;
        case MPA_REQ_WAIT:
+               update_rx_credits(ep, dlen);
                ep->rcv_seq += dlen;
                disconnect = process_mpa_request(ep, skb);
                break;
        case FPDU_MODE: {
                struct c4iw_qp_attributes attrs;
+
+               update_rx_credits(ep, dlen);
                BUG_ON(!ep->com.qp);
                if (status)
                        pr_err("%s Unexpected streaming data." \
index 19c6477af19f1416d17c15363e239307542b438d..bec82a600d77c7990432c756ecb8124df6a52852 100644 (file)
@@ -504,6 +504,15 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
                goto skip_cqe;
        }
 
+       /*
+        * Special cqe for drain WR completions...
+        */
+       if (CQE_OPCODE(hw_cqe) == C4IW_DRAIN_OPCODE) {
+               *cookie = CQE_DRAIN_COOKIE(hw_cqe);
+               *cqe = *hw_cqe;
+               goto skip_cqe;
+       }
+
        /*
         * Gotta tweak READ completions:
         *      1) the cqe doesn't contain the sq_wptr from the wr.
@@ -753,6 +762,9 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
                                c4iw_invalidate_mr(qhp->rhp,
                                                   CQE_WRID_FR_STAG(&cqe));
                        break;
+               case C4IW_DRAIN_OPCODE:
+                       wc->opcode = IB_WC_SEND;
+                       break;
                default:
                        printk(KERN_ERR MOD "Unexpected opcode %d "
                               "in the CQE received for QPID=0x%0x\n",
@@ -817,15 +829,8 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
                }
        }
 out:
-       if (wq) {
-               if (unlikely(qhp->attr.state != C4IW_QP_STATE_RTS)) {
-                       if (t4_sq_empty(wq))
-                               complete(&qhp->sq_drained);
-                       if (t4_rq_empty(wq))
-                               complete(&qhp->rq_drained);
-               }
+       if (wq)
                spin_unlock(&qhp->lock);
-       }
        return ret;
 }
 
index 516b0ae6dc3f6d061cc3c7d8ba412196785a599c..40c0e7b9fc6e4e671eb4fb03682f9130594c90cb 100644 (file)
@@ -846,9 +846,17 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
                }
        }
 
+       rdev->free_workq = create_singlethread_workqueue("iw_cxgb4_free");
+       if (!rdev->free_workq) {
+               err = -ENOMEM;
+               goto err_free_status_page;
+       }
+
        rdev->status_page->db_off = 0;
 
        return 0;
+err_free_status_page:
+       free_page((unsigned long)rdev->status_page);
 destroy_ocqp_pool:
        c4iw_ocqp_pool_destroy(rdev);
 destroy_rqtpool:
@@ -862,6 +870,7 @@ destroy_resource:
 
 static void c4iw_rdev_close(struct c4iw_rdev *rdev)
 {
+       destroy_workqueue(rdev->free_workq);
        kfree(rdev->wr_log);
        free_page((unsigned long)rdev->status_page);
        c4iw_pblpool_destroy(rdev);
index 4788e1a46fdee23cce2956cc17ba8d09b0f3eb56..d19662f635b1cc36d578653c1b00de78f35dc081 100644 (file)
@@ -45,6 +45,7 @@
 #include <linux/kref.h>
 #include <linux/timer.h>
 #include <linux/io.h>
+#include <linux/workqueue.h>
 
 #include <asm/byteorder.h>
 
@@ -107,6 +108,7 @@ struct c4iw_dev_ucontext {
        struct list_head qpids;
        struct list_head cqids;
        struct mutex lock;
+       struct kref kref;
 };
 
 enum c4iw_rdev_flags {
@@ -183,6 +185,7 @@ struct c4iw_rdev {
        atomic_t wr_log_idx;
        struct wr_log_entry *wr_log;
        int wr_log_size;
+       struct workqueue_struct *free_workq;
 };
 
 static inline int c4iw_fatal_error(struct c4iw_rdev *rdev)
@@ -480,8 +483,8 @@ struct c4iw_qp {
        wait_queue_head_t wait;
        struct timer_list timer;
        int sq_sig_all;
-       struct completion rq_drained;
-       struct completion sq_drained;
+       struct work_struct free_work;
+       struct c4iw_ucontext *ucontext;
 };
 
 static inline struct c4iw_qp *to_c4iw_qp(struct ib_qp *ibqp)
@@ -495,6 +498,7 @@ struct c4iw_ucontext {
        u32 key;
        spinlock_t mmap_lock;
        struct list_head mmaps;
+       struct kref kref;
 };
 
 static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c)
@@ -502,6 +506,18 @@ static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c)
        return container_of(c, struct c4iw_ucontext, ibucontext);
 }
 
+void _c4iw_free_ucontext(struct kref *kref);
+
+static inline void c4iw_put_ucontext(struct c4iw_ucontext *ucontext)
+{
+       kref_put(&ucontext->kref, _c4iw_free_ucontext);
+}
+
+static inline void c4iw_get_ucontext(struct c4iw_ucontext *ucontext)
+{
+       kref_get(&ucontext->kref);
+}
+
 struct c4iw_mm_entry {
        struct list_head entry;
        u64 addr;
@@ -615,6 +631,8 @@ static inline int to_ib_qp_state(int c4iw_qp_state)
        return IB_QPS_ERR;
 }
 
+#define C4IW_DRAIN_OPCODE FW_RI_SGE_EC_CR_RETURN
+
 static inline u32 c4iw_ib_to_tpt_access(int a)
 {
        return (a & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) |
@@ -654,14 +672,14 @@ enum c4iw_mmid_state {
 
 #define c4iw_put_ep(ep) { \
        PDBG("put_ep (via %s:%u) ep %p refcnt %d\n", __func__, __LINE__,  \
-            ep, atomic_read(&((ep)->kref.refcount))); \
-       WARN_ON(atomic_read(&((ep)->kref.refcount)) < 1); \
+            ep, kref_read(&((ep)->kref))); \
+       WARN_ON(kref_read(&((ep)->kref)) < 1); \
        kref_put(&((ep)->kref), _c4iw_free_ep); \
 }
 
 #define c4iw_get_ep(ep) { \
        PDBG("get_ep (via %s:%u) ep %p, refcnt %d\n", __func__, __LINE__, \
-            ep, atomic_read(&((ep)->kref.refcount))); \
+            ep, kref_read(&((ep)->kref))); \
        kref_get(&((ep)->kref));  \
 }
 void _c4iw_free_ep(struct kref *kref);
@@ -997,8 +1015,6 @@ extern int c4iw_wr_log;
 extern int db_fc_threshold;
 extern int db_coalescing_threshold;
 extern int use_dsgl;
-void c4iw_drain_rq(struct ib_qp *qp);
-void c4iw_drain_sq(struct ib_qp *qp);
 void c4iw_invalidate_mr(struct c4iw_dev *rhp, u32 rkey);
 
 #endif
index 49b51b7e0fd786bf49dc2187e6c661f14087f3ae..3345e1c312f771cfaa8e31858624ca9892267467 100644 (file)
@@ -93,17 +93,28 @@ static int c4iw_process_mad(struct ib_device *ibdev, int mad_flags,
        return -ENOSYS;
 }
 
-static int c4iw_dealloc_ucontext(struct ib_ucontext *context)
+void _c4iw_free_ucontext(struct kref *kref)
 {
-       struct c4iw_dev *rhp = to_c4iw_dev(context->device);
-       struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context);
+       struct c4iw_ucontext *ucontext;
+       struct c4iw_dev *rhp;
        struct c4iw_mm_entry *mm, *tmp;
 
-       PDBG("%s context %p\n", __func__, context);
+       ucontext = container_of(kref, struct c4iw_ucontext, kref);
+       rhp = to_c4iw_dev(ucontext->ibucontext.device);
+
+       PDBG("%s ucontext %p\n", __func__, ucontext);
        list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry)
                kfree(mm);
        c4iw_release_dev_ucontext(&rhp->rdev, &ucontext->uctx);
        kfree(ucontext);
+}
+
+static int c4iw_dealloc_ucontext(struct ib_ucontext *context)
+{
+       struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context);
+
+       PDBG("%s context %p\n", __func__, context);
+       c4iw_put_ucontext(ucontext);
        return 0;
 }
 
@@ -127,6 +138,7 @@ static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev,
        c4iw_init_dev_ucontext(&rhp->rdev, &context->uctx);
        INIT_LIST_HEAD(&context->mmaps);
        spin_lock_init(&context->mmap_lock);
+       kref_init(&context->kref);
 
        if (udata->outlen < sizeof(uresp) - sizeof(uresp.reserved)) {
                if (!warned++)
@@ -361,16 +373,7 @@ static int c4iw_query_port(struct ib_device *ibdev, u8 port,
 
        memset(props, 0, sizeof(struct ib_port_attr));
        props->max_mtu = IB_MTU_4096;
-       if (netdev->mtu >= 4096)
-               props->active_mtu = IB_MTU_4096;
-       else if (netdev->mtu >= 2048)
-               props->active_mtu = IB_MTU_2048;
-       else if (netdev->mtu >= 1024)
-               props->active_mtu = IB_MTU_1024;
-       else if (netdev->mtu >= 512)
-               props->active_mtu = IB_MTU_512;
-       else
-               props->active_mtu = IB_MTU_256;
+       props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
 
        if (!netif_carrier_ok(netdev))
                props->state = IB_PORT_DOWN;
@@ -607,8 +610,6 @@ int c4iw_register_device(struct c4iw_dev *dev)
        dev->ibdev.uverbs_abi_ver = C4IW_UVERBS_ABI_VERSION;
        dev->ibdev.get_port_immutable = c4iw_port_immutable;
        dev->ibdev.get_dev_fw_str = get_dev_fw_str;
-       dev->ibdev.drain_sq = c4iw_drain_sq;
-       dev->ibdev.drain_rq = c4iw_drain_rq;
 
        dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL);
        if (!dev->ibdev.iwcm)
index cda5542e13a206347447a49f18f9e8cb930e7c8c..d4fd2f5c8326b61bf29ae28ebc3559457cf1d31a 100644 (file)
@@ -715,13 +715,32 @@ static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
        return 0;
 }
 
-static void _free_qp(struct kref *kref)
+static void free_qp_work(struct work_struct *work)
+{
+       struct c4iw_ucontext *ucontext;
+       struct c4iw_qp *qhp;
+       struct c4iw_dev *rhp;
+
+       qhp = container_of(work, struct c4iw_qp, free_work);
+       ucontext = qhp->ucontext;
+       rhp = qhp->rhp;
+
+       PDBG("%s qhp %p ucontext %p\n", __func__, qhp, ucontext);
+       destroy_qp(&rhp->rdev, &qhp->wq,
+                  ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
+
+       if (ucontext)
+               c4iw_put_ucontext(ucontext);
+       kfree(qhp);
+}
+
+static void queue_qp_free(struct kref *kref)
 {
        struct c4iw_qp *qhp;
 
        qhp = container_of(kref, struct c4iw_qp, kref);
        PDBG("%s qhp %p\n", __func__, qhp);
-       kfree(qhp);
+       queue_work(qhp->rhp->rdev.free_workq, &qhp->free_work);
 }
 
 void c4iw_qp_add_ref(struct ib_qp *qp)
@@ -733,7 +752,7 @@ void c4iw_qp_add_ref(struct ib_qp *qp)
 void c4iw_qp_rem_ref(struct ib_qp *qp)
 {
        PDBG("%s ib_qp %p\n", __func__, qp);
-       kref_put(&to_c4iw_qp(qp)->kref, _free_qp);
+       kref_put(&to_c4iw_qp(qp)->kref, queue_qp_free);
 }
 
 static void add_to_fc_list(struct list_head *head, struct list_head *entry)
@@ -776,6 +795,64 @@ static int ring_kernel_rq_db(struct c4iw_qp *qhp, u16 inc)
        return 0;
 }
 
+static void complete_sq_drain_wr(struct c4iw_qp *qhp, struct ib_send_wr *wr)
+{
+       struct t4_cqe cqe = {};
+       struct c4iw_cq *schp;
+       unsigned long flag;
+       struct t4_cq *cq;
+
+       schp = to_c4iw_cq(qhp->ibqp.send_cq);
+       cq = &schp->cq;
+
+       cqe.u.drain_cookie = wr->wr_id;
+       cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
+                                CQE_OPCODE_V(C4IW_DRAIN_OPCODE) |
+                                CQE_TYPE_V(1) |
+                                CQE_SWCQE_V(1) |
+                                CQE_QPID_V(qhp->wq.sq.qid));
+
+       spin_lock_irqsave(&schp->lock, flag);
+       cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
+       cq->sw_queue[cq->sw_pidx] = cqe;
+       t4_swcq_produce(cq);
+       spin_unlock_irqrestore(&schp->lock, flag);
+
+       spin_lock_irqsave(&schp->comp_handler_lock, flag);
+       (*schp->ibcq.comp_handler)(&schp->ibcq,
+                                  schp->ibcq.cq_context);
+       spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
+}
+
+static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr)
+{
+       struct t4_cqe cqe = {};
+       struct c4iw_cq *rchp;
+       unsigned long flag;
+       struct t4_cq *cq;
+
+       rchp = to_c4iw_cq(qhp->ibqp.recv_cq);
+       cq = &rchp->cq;
+
+       cqe.u.drain_cookie = wr->wr_id;
+       cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
+                                CQE_OPCODE_V(C4IW_DRAIN_OPCODE) |
+                                CQE_TYPE_V(0) |
+                                CQE_SWCQE_V(1) |
+                                CQE_QPID_V(qhp->wq.sq.qid));
+
+       spin_lock_irqsave(&rchp->lock, flag);
+       cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
+       cq->sw_queue[cq->sw_pidx] = cqe;
+       t4_swcq_produce(cq);
+       spin_unlock_irqrestore(&rchp->lock, flag);
+
+       spin_lock_irqsave(&rchp->comp_handler_lock, flag);
+       (*rchp->ibcq.comp_handler)(&rchp->ibcq,
+                                  rchp->ibcq.cq_context);
+       spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
+}
+
 int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                   struct ib_send_wr **bad_wr)
 {
@@ -794,8 +871,8 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
        spin_lock_irqsave(&qhp->lock, flag);
        if (t4_wq_in_error(&qhp->wq)) {
                spin_unlock_irqrestore(&qhp->lock, flag);
-               *bad_wr = wr;
-               return -EINVAL;
+               complete_sq_drain_wr(qhp, wr);
+               return err;
        }
        num_wrs = t4_sq_avail(&qhp->wq);
        if (num_wrs == 0) {
@@ -937,8 +1014,8 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
        spin_lock_irqsave(&qhp->lock, flag);
        if (t4_wq_in_error(&qhp->wq)) {
                spin_unlock_irqrestore(&qhp->lock, flag);
-               *bad_wr = wr;
-               return -EINVAL;
+               complete_rq_drain_wr(qhp, wr);
+               return err;
        }
        num_wrs = t4_rq_avail(&qhp->wq);
        if (num_wrs == 0) {
@@ -1503,7 +1580,7 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
        case C4IW_QP_STATE_RTS:
                switch (attrs->next_state) {
                case C4IW_QP_STATE_CLOSING:
-                       BUG_ON(atomic_read(&qhp->ep->com.kref.refcount) < 2);
+                       BUG_ON(kref_read(&qhp->ep->com.kref) < 2);
                        t4_set_wq_in_error(&qhp->wq);
                        set_state(qhp, C4IW_QP_STATE_CLOSING);
                        ep = qhp->ep;
@@ -1550,7 +1627,12 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
                }
                break;
        case C4IW_QP_STATE_CLOSING:
-               if (!internal) {
+
+               /*
+                * Allow kernel users to move to ERROR for qp draining.
+                */
+               if (!internal && (qhp->ibqp.uobject || attrs->next_state !=
+                                 C4IW_QP_STATE_ERROR)) {
                        ret = -EINVAL;
                        goto out;
                }
@@ -1643,7 +1725,6 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp)
        struct c4iw_dev *rhp;
        struct c4iw_qp *qhp;
        struct c4iw_qp_attributes attrs;
-       struct c4iw_ucontext *ucontext;
 
        qhp = to_c4iw_qp(ib_qp);
        rhp = qhp->rhp;
@@ -1663,11 +1744,6 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp)
        spin_unlock_irq(&rhp->lock);
        free_ird(rhp, qhp->attr.max_ird);
 
-       ucontext = ib_qp->uobject ?
-                  to_c4iw_ucontext(ib_qp->uobject->context) : NULL;
-       destroy_qp(&rhp->rdev, &qhp->wq,
-                  ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
-
        c4iw_qp_rem_ref(ib_qp);
 
        PDBG("%s ib_qp %p qpid 0x%0x\n", __func__, ib_qp, qhp->wq.sq.qid);
@@ -1763,11 +1839,10 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
        qhp->attr.max_ird = 0;
        qhp->sq_sig_all = attrs->sq_sig_type == IB_SIGNAL_ALL_WR;
        spin_lock_init(&qhp->lock);
-       init_completion(&qhp->sq_drained);
-       init_completion(&qhp->rq_drained);
        mutex_init(&qhp->mutex);
        init_waitqueue_head(&qhp->wait);
        kref_init(&qhp->kref);
+       INIT_WORK(&qhp->free_work, free_qp_work);
 
        ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
        if (ret)
@@ -1854,6 +1929,9 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
                        ma_sync_key_mm->len = PAGE_SIZE;
                        insert_mmap(ucontext, ma_sync_key_mm);
                }
+
+               c4iw_get_ucontext(ucontext);
+               qhp->ucontext = ucontext;
        }
        qhp->ibqp.qp_num = qhp->wq.sq.qid;
        init_timer(&(qhp->timer));
@@ -1958,40 +2036,3 @@ int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
        init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0;
        return 0;
 }
-
-static void move_qp_to_err(struct c4iw_qp *qp)
-{
-       struct c4iw_qp_attributes attrs = { .next_state = C4IW_QP_STATE_ERROR };
-
-       (void)c4iw_modify_qp(qp->rhp, qp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
-}
-
-void c4iw_drain_sq(struct ib_qp *ibqp)
-{
-       struct c4iw_qp *qp = to_c4iw_qp(ibqp);
-       unsigned long flag;
-       bool need_to_wait;
-
-       move_qp_to_err(qp);
-       spin_lock_irqsave(&qp->lock, flag);
-       need_to_wait = !t4_sq_empty(&qp->wq);
-       spin_unlock_irqrestore(&qp->lock, flag);
-
-       if (need_to_wait)
-               wait_for_completion(&qp->sq_drained);
-}
-
-void c4iw_drain_rq(struct ib_qp *ibqp)
-{
-       struct c4iw_qp *qp = to_c4iw_qp(ibqp);
-       unsigned long flag;
-       bool need_to_wait;
-
-       move_qp_to_err(qp);
-       spin_lock_irqsave(&qp->lock, flag);
-       need_to_wait = !t4_rq_empty(&qp->wq);
-       spin_unlock_irqrestore(&qp->lock, flag);
-
-       if (need_to_wait)
-               wait_for_completion(&qp->rq_drained);
-}
index 862381aa83c824bb8712e408df39e49f47f11975..640d22148a3eeb86fdc2d5c795dcf02271bf57fb 100644 (file)
@@ -179,6 +179,7 @@ struct t4_cqe {
                        __be32 wrid_hi;
                        __be32 wrid_low;
                } gen;
+               u64 drain_cookie;
        } u;
        __be64 reserved;
        __be64 bits_type_ts;
@@ -238,6 +239,7 @@ struct t4_cqe {
 /* generic accessor macros */
 #define CQE_WRID_HI(x)         (be32_to_cpu((x)->u.gen.wrid_hi))
 #define CQE_WRID_LOW(x)                (be32_to_cpu((x)->u.gen.wrid_low))
+#define CQE_DRAIN_COOKIE(x)    ((x)->u.drain_cookie)
 
 /* macros for flit 3 of the cqe */
 #define CQE_GENBIT_S   63
index 29e97df9e1a7f87c784ebf33f4ebccfae217f433..4c000d60d5c6f865ae17aa28654497e3dbbb913c 100644 (file)
@@ -100,16 +100,7 @@ static int i40iw_query_port(struct ib_device *ibdev,
        memset(props, 0, sizeof(*props));
 
        props->max_mtu = IB_MTU_4096;
-       if (netdev->mtu >= 4096)
-               props->active_mtu = IB_MTU_4096;
-       else if (netdev->mtu >= 2048)
-               props->active_mtu = IB_MTU_2048;
-       else if (netdev->mtu >= 1024)
-               props->active_mtu = IB_MTU_1024;
-       else if (netdev->mtu >= 512)
-               props->active_mtu = IB_MTU_512;
-       else
-               props->active_mtu = IB_MTU_256;
+       props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
 
        props->lid = 1;
        if (netif_carrier_ok(iwdev->netdev))
index aff9fb14768be9006e05145b2bdd7b6f7dbee8ff..5a31f3c6a4211d507cc4634c49df53021bba505b 100644 (file)
@@ -478,17 +478,7 @@ static int nes_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr
        memset(props, 0, sizeof(*props));
 
        props->max_mtu = IB_MTU_4096;
-
-       if (netdev->mtu  >= 4096)
-               props->active_mtu = IB_MTU_4096;
-       else if (netdev->mtu  >= 2048)
-               props->active_mtu = IB_MTU_2048;
-       else if (netdev->mtu  >= 1024)
-               props->active_mtu = IB_MTU_1024;
-       else if (netdev->mtu  >= 512)
-               props->active_mtu = IB_MTU_512;
-       else
-               props->active_mtu = IB_MTU_256;
+       props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
 
        props->lid = 1;
        props->lmc = 0;
index 7b74d09a8217ca0f30de8d5065bb4b829100c56f..3ac8aa5ef37de2c5242125077eef78035d565901 100644 (file)
@@ -576,8 +576,7 @@ static int qedr_set_device_attr(struct qedr_dev *dev)
        return 0;
 }
 
-void qedr_unaffiliated_event(void *context,
-                            u8 event_code)
+void qedr_unaffiliated_event(void *context, u8 event_code)
 {
        pr_err("unaffiliated event not implemented yet\n");
 }
@@ -792,6 +791,9 @@ static struct qedr_dev *qedr_add(struct qed_dev *cdev, struct pci_dev *pdev,
                if (device_create_file(&dev->ibdev.dev, qedr_attributes[i]))
                        goto sysfs_err;
 
+       if (!test_and_set_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
+               qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ACTIVE);
+
        DP_DEBUG(dev, QEDR_MSG_INIT, "qedr driver loaded successfully\n");
        return dev;
 
@@ -824,11 +826,10 @@ static void qedr_remove(struct qedr_dev *dev)
        ib_dealloc_device(&dev->ibdev);
 }
 
-static int qedr_close(struct qedr_dev *dev)
+static void qedr_close(struct qedr_dev *dev)
 {
-       qedr_ib_dispatch_event(dev, 1, IB_EVENT_PORT_ERR);
-
-       return 0;
+       if (test_and_clear_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
+               qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ERR);
 }
 
 static void qedr_shutdown(struct qedr_dev *dev)
@@ -837,6 +838,12 @@ static void qedr_shutdown(struct qedr_dev *dev)
        qedr_remove(dev);
 }
 
+static void qedr_open(struct qedr_dev *dev)
+{
+       if (!test_and_set_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
+               qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ACTIVE);
+}
+
 static void qedr_mac_address_change(struct qedr_dev *dev)
 {
        union ib_gid *sgid = &dev->sgid_tbl[0];
@@ -863,7 +870,7 @@ static void qedr_mac_address_change(struct qedr_dev *dev)
 
        ether_addr_copy(dev->gsi_ll2_mac_address, dev->ndev->dev_addr);
 
-       qedr_ib_dispatch_event(dev, 1, IB_EVENT_GID_CHANGE);
+       qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_GID_CHANGE);
 
        if (rc)
                DP_ERR(dev, "Error updating mac filter\n");
@@ -877,7 +884,7 @@ static void qedr_notify(struct qedr_dev *dev, enum qede_roce_event event)
 {
        switch (event) {
        case QEDE_UP:
-               qedr_ib_dispatch_event(dev, 1, IB_EVENT_PORT_ACTIVE);
+               qedr_open(dev);
                break;
        case QEDE_DOWN:
                qedr_close(dev);
index 620badd7d4fbd7f6ceaa61aff95ef1e41161f0be..bb32e4792ec9f022d201c0585bcce7a7cbae179c 100644 (file)
@@ -113,6 +113,8 @@ struct qedr_device_attr {
        struct qed_rdma_events events;
 };
 
+#define QEDR_ENET_STATE_BIT    (0)
+
 struct qedr_dev {
        struct ib_device        ibdev;
        struct qed_dev          *cdev;
@@ -153,6 +155,8 @@ struct qedr_dev {
        struct qedr_cq          *gsi_sqcq;
        struct qedr_cq          *gsi_rqcq;
        struct qedr_qp          *gsi_qp;
+
+       unsigned long enet_state;
 };
 
 #define QEDR_MAX_SQ_PBL                        (0x8000)
@@ -188,6 +192,7 @@ struct qedr_dev {
 #define QEDR_ROCE_MAX_CNQ_SIZE         (0x4000)
 
 #define QEDR_MAX_PORT                  (1)
+#define QEDR_PORT                      (1)
 
 #define QEDR_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME)
 
@@ -251,9 +256,6 @@ struct qedr_cq {
 
        u16 icid;
 
-       /* Lock to protect completion handler */
-       spinlock_t comp_handler_lock;
-
        /* Lock to protect multiplem CQ's */
        spinlock_t cq_lock;
        u8 arm_flags;
index 63890ebb72bdff1c525e87786f59b124b2ea8d3f..a9a8d8745d2e7f9ca20a5c849e042fdc6af261d6 100644 (file)
@@ -87,11 +87,8 @@ void qedr_ll2_tx_cb(void *_qdev, struct qed_roce_ll2_packet *pkt)
        qedr_inc_sw_gsi_cons(&qp->sq);
        spin_unlock_irqrestore(&qp->q_lock, flags);
 
-       if (cq->ibcq.comp_handler) {
-               spin_lock_irqsave(&cq->comp_handler_lock, flags);
+       if (cq->ibcq.comp_handler)
                (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
-               spin_unlock_irqrestore(&cq->comp_handler_lock, flags);
-       }
 }
 
 void qedr_ll2_rx_cb(void *_dev, struct qed_roce_ll2_packet *pkt,
@@ -113,11 +110,8 @@ void qedr_ll2_rx_cb(void *_dev, struct qed_roce_ll2_packet *pkt,
 
        spin_unlock_irqrestore(&qp->q_lock, flags);
 
-       if (cq->ibcq.comp_handler) {
-               spin_lock_irqsave(&cq->comp_handler_lock, flags);
+       if (cq->ibcq.comp_handler)
                (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
-               spin_unlock_irqrestore(&cq->comp_handler_lock, flags);
-       }
 }
 
 static void qedr_destroy_gsi_cq(struct qedr_dev *dev,
@@ -404,9 +398,9 @@ static inline int qedr_gsi_build_packet(struct qedr_dev *dev,
        }
 
        if (ether_addr_equal(udh.eth.smac_h, udh.eth.dmac_h))
-               packet->tx_dest = QED_ROCE_LL2_TX_DEST_NW;
-       else
                packet->tx_dest = QED_ROCE_LL2_TX_DEST_LB;
+       else
+               packet->tx_dest = QED_ROCE_LL2_TX_DEST_NW;
 
        packet->roce_mode = roce_mode;
        memcpy(packet->header.vaddr, ud_header_buffer, header_size);
index 57c8de2080773b161272774a69eaebf02cc411ed..c7d6c9a783bd615627e720eb8f043444c99c5f3c 100644 (file)
@@ -471,8 +471,6 @@ struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
                            struct ib_ucontext *context, struct ib_udata *udata)
 {
        struct qedr_dev *dev = get_qedr_dev(ibdev);
-       struct qedr_ucontext *uctx = NULL;
-       struct qedr_alloc_pd_uresp uresp;
        struct qedr_pd *pd;
        u16 pd_id;
        int rc;
@@ -489,21 +487,33 @@ struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
        if (!pd)
                return ERR_PTR(-ENOMEM);
 
-       dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id);
+       rc = dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id);
+       if (rc)
+               goto err;
 
-       uresp.pd_id = pd_id;
        pd->pd_id = pd_id;
 
        if (udata && context) {
+               struct qedr_alloc_pd_uresp uresp;
+
+               uresp.pd_id = pd_id;
+
                rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
-               if (rc)
+               if (rc) {
                        DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id);
-               uctx = get_qedr_ucontext(context);
-               uctx->pd = pd;
-               pd->uctx = uctx;
+                       dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd_id);
+                       goto err;
+               }
+
+               pd->uctx = get_qedr_ucontext(context);
+               pd->uctx->pd = pd;
        }
 
        return &pd->ibpd;
+
+err:
+       kfree(pd);
+       return ERR_PTR(rc);
 }
 
 int qedr_dealloc_pd(struct ib_pd *ibpd)
@@ -1600,7 +1610,7 @@ err0:
        return ERR_PTR(-EFAULT);
 }
 
-enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)
+static enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)
 {
        switch (qp_state) {
        case QED_ROCE_QP_STATE_RESET:
@@ -1621,7 +1631,8 @@ enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)
        return IB_QPS_ERR;
 }
 
-enum qed_roce_qp_state qedr_get_state_from_ibqp(enum ib_qp_state qp_state)
+static enum qed_roce_qp_state qedr_get_state_from_ibqp(
+                                       enum ib_qp_state qp_state)
 {
        switch (qp_state) {
        case IB_QPS_RESET:
@@ -1657,7 +1668,7 @@ static int qedr_update_qp_state(struct qedr_dev *dev,
        int status = 0;
 
        if (new_state == qp->state)
-               return 1;
+               return 0;
 
        switch (qp->state) {
        case QED_ROCE_QP_STATE_RESET:
@@ -1733,6 +1744,14 @@ static int qedr_update_qp_state(struct qedr_dev *dev,
                /* ERR->XXX */
                switch (new_state) {
                case QED_ROCE_QP_STATE_RESET:
+                       if ((qp->rq.prod != qp->rq.cons) ||
+                           (qp->sq.prod != qp->sq.cons)) {
+                               DP_NOTICE(dev,
+                                         "Error->Reset with rq/sq not empty rq.prod=%x rq.cons=%x sq.prod=%x sq.cons=%x\n",
+                                         qp->rq.prod, qp->rq.cons, qp->sq.prod,
+                                         qp->sq.cons);
+                               status = -EINVAL;
+                       }
                        break;
                default:
                        status = -EINVAL;
@@ -1865,7 +1884,6 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
                         qp_params.sgid.dwords[2], qp_params.sgid.dwords[3]);
                DP_DEBUG(dev, QEDR_MSG_QP, "remote_mac=[%pM]\n",
                         qp_params.remote_mac_addr);
-;
 
                qp_params.mtu = qp->mtu;
                qp_params.lb_indication = false;
@@ -2016,7 +2034,7 @@ int qedr_query_qp(struct ib_qp *ibqp,
 
        qp_attr->qp_state = qedr_get_ibqp_state(params.state);
        qp_attr->cur_qp_state = qedr_get_ibqp_state(params.state);
-       qp_attr->path_mtu = iboe_get_mtu(params.mtu);
+       qp_attr->path_mtu = ib_mtu_int_to_enum(params.mtu);
        qp_attr->path_mig_state = IB_MIG_MIGRATED;
        qp_attr->rq_psn = params.rq_psn;
        qp_attr->sq_psn = params.sq_psn;
@@ -2028,7 +2046,7 @@ int qedr_query_qp(struct ib_qp *ibqp,
        qp_attr->cap.max_recv_wr = qp->rq.max_wr;
        qp_attr->cap.max_send_sge = qp->sq.max_sges;
        qp_attr->cap.max_recv_sge = qp->rq.max_sges;
-       qp_attr->cap.max_inline_data = qp->max_inline_data;
+       qp_attr->cap.max_inline_data = ROCE_REQ_MAX_INLINE_DATA_SIZE;
        qp_init_attr->cap = qp_attr->cap;
 
        memcpy(&qp_attr->ah_attr.grh.dgid.raw[0], &params.dgid.bytes[0],
@@ -2302,7 +2320,8 @@ int qedr_dereg_mr(struct ib_mr *ib_mr)
        return rc;
 }
 
-struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd, int max_page_list_len)
+static struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd,
+                                      int max_page_list_len)
 {
        struct qedr_pd *pd = get_qedr_pd(ibpd);
        struct qedr_dev *dev = get_qedr_dev(ibpd->device);
@@ -2704,7 +2723,7 @@ static int qedr_prepare_reg(struct qedr_qp *qp,
        return 0;
 }
 
-enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
+static enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
 {
        switch (opcode) {
        case IB_WR_RDMA_WRITE:
@@ -2729,7 +2748,7 @@ enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
        }
 }
 
-inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr)
+static inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr)
 {
        int wq_is_full, err_wr, pbl_is_full;
        struct qedr_dev *dev = qp->dev;
@@ -2766,7 +2785,7 @@ inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr)
        return true;
 }
 
-int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                     struct ib_send_wr **bad_wr)
 {
        struct qedr_dev *dev = get_qedr_dev(ibqp->device);
@@ -3234,9 +3253,10 @@ static int qedr_poll_cq_req(struct qedr_dev *dev,
                                  IB_WC_SUCCESS, 0);
                break;
        case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR:
-               DP_ERR(dev,
-                      "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
-                      cq->icid, qp->icid);
+               if (qp->state != QED_ROCE_QP_STATE_ERR)
+                       DP_ERR(dev,
+                              "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+                              cq->icid, qp->icid);
                cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
                                  IB_WC_WR_FLUSH_ERR, 1);
                break;
index 80ef3f8998c87136a6f941e151138f1440aa031e..04443242e25877ccd65a569500574e89a64aa433 100644 (file)
@@ -80,7 +80,7 @@ usnic_ib_show_config(struct device *device, struct device_attribute *attr,
        left = PAGE_SIZE;
 
        mutex_lock(&us_ibdev->usdev_lock);
-       if (atomic_read(&us_ibdev->vf_cnt.refcount) > 0) {
+       if (kref_read(&us_ibdev->vf_cnt) > 0) {
                char *busname;
 
                /*
@@ -99,7 +99,7 @@ usnic_ib_show_config(struct device *device, struct device_attribute *attr,
                        PCI_FUNC(us_ibdev->pdev->devfn),
                        netdev_name(us_ibdev->netdev),
                        us_ibdev->ufdev->mac,
-                       atomic_read(&us_ibdev->vf_cnt.refcount));
+                       kref_read(&us_ibdev->vf_cnt));
                UPDATE_PTR_LEFT(n, ptr, left);
 
                for (res_type = USNIC_VNIC_RES_TYPE_EOL;
@@ -147,7 +147,7 @@ usnic_ib_show_max_vf(struct device *device, struct device_attribute *attr,
        us_ibdev = container_of(device, struct usnic_ib_dev, ib_dev.dev);
 
        return scnprintf(buf, PAGE_SIZE, "%u\n",
-                       atomic_read(&us_ibdev->vf_cnt.refcount));
+                       kref_read(&us_ibdev->vf_cnt));
 }
 
 static ssize_t
index 74819a7951e28f64f41655cf8967adb950db6c47..69df8e353123c893aa1f34277bc86dd961bffdea 100644 (file)
@@ -291,11 +291,11 @@ int usnic_ib_query_device(struct ib_device *ibdev,
        qp_per_vf = max(us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_WQ],
                        us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_RQ]);
        props->max_qp = qp_per_vf *
-               atomic_read(&us_ibdev->vf_cnt.refcount);
+               kref_read(&us_ibdev->vf_cnt);
        props->device_cap_flags = IB_DEVICE_PORT_ACTIVE_EVENT |
                IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
        props->max_cq = us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_CQ] *
-               atomic_read(&us_ibdev->vf_cnt.refcount);
+               kref_read(&us_ibdev->vf_cnt);
        props->max_pd = USNIC_UIOM_MAX_PD_CNT;
        props->max_mr = USNIC_UIOM_MAX_MR_CNT;
        props->local_ca_ack_delay = 0;
index 231a1ce1f4bec845d6ecfc58f53cd0d4ea5762a1..bd8fbd3d2032d390cc41268e7d32544ff0c98308 100644 (file)
@@ -1029,7 +1029,7 @@ static int pvrdma_pci_probe(struct pci_dev *pdev,
        if (ret) {
                dev_err(&pdev->dev, "failed to allocate interrupts\n");
                ret = -ENOMEM;
-               goto err_netdevice;
+               goto err_free_cq_ring;
        }
 
        /* Allocate UAR table. */
@@ -1092,8 +1092,6 @@ err_free_uar_table:
 err_free_intrs:
        pvrdma_free_irq(dev);
        pvrdma_disable_msi_all(dev);
-err_netdevice:
-       unregister_netdevice_notifier(&dev->nb_netdev);
 err_free_cq_ring:
        pvrdma_page_dir_cleanup(dev, &dev->cq_pdir);
 err_free_async_ring:
index 54891370d18a5beef151816c882b40fb705a9ec5..c2aa52638dcb81ea4539b61c43d61f55edefb2b3 100644 (file)
@@ -306,7 +306,7 @@ struct ib_ucontext *pvrdma_alloc_ucontext(struct ib_device *ibdev,
        union pvrdma_cmd_resp rsp;
        struct pvrdma_cmd_create_uc *cmd = &req.create_uc;
        struct pvrdma_cmd_create_uc_resp *resp = &rsp.create_uc_resp;
-       struct pvrdma_alloc_ucontext_resp uresp;
+       struct pvrdma_alloc_ucontext_resp uresp = {0};
        int ret;
        void *ptr;
 
index d0faca294006f4f53a40ebcf40839f9758b52c09..86a6585b847df90f07256dd4027eeab426be7f77 100644 (file)
@@ -59,9 +59,11 @@ int mem_check_range(struct rxe_mem *mem, u64 iova, size_t length)
 
        case RXE_MEM_TYPE_MR:
        case RXE_MEM_TYPE_FMR:
-               return ((iova < mem->iova) ||
-                       ((iova + length) > (mem->iova + mem->length))) ?
-                       -EFAULT : 0;
+               if (iova < mem->iova ||
+                   length > mem->length ||
+                   iova > mem->iova + mem->length - length)
+                       return -EFAULT;
+               return 0;
 
        default:
                return -EFAULT;
index 342e78163613dfdc719b171e1396d01fd44432eb..4abdeb359fb4f52cbacde5c3e1f488296c6064c5 100644 (file)
@@ -555,7 +555,7 @@ struct rxe_dev *rxe_net_add(struct net_device *ndev)
        }
 
        spin_lock_bh(&dev_list_lock);
-       list_add_tail(&rxe_dev_list, &rxe->list);
+       list_add_tail(&rxe->list, &rxe_dev_list);
        spin_unlock_bh(&dev_list_lock);
        return rxe;
 }
index 486d576e55bc016dda1f8ddad6b8f00941f66727..44b2108253bd988ec1f5222da999575ed37d3bed 100644 (file)
@@ -813,8 +813,7 @@ void rxe_qp_destroy(struct rxe_qp *qp)
        del_timer_sync(&qp->rnr_nak_timer);
 
        rxe_cleanup_task(&qp->req.task);
-       if (qp_type(qp) == IB_QPT_RC)
-               rxe_cleanup_task(&qp->comp.task);
+       rxe_cleanup_task(&qp->comp.task);
 
        /* flush out any receive wr's or pending requests */
        __rxe_do_task(&qp->req.task);
index 3435efff879960cece0c7e122b5960a057f2d4a1..5bcf073289729bd881960e8f797af1aa82e3b5bc 100644 (file)
@@ -479,7 +479,7 @@ static enum resp_states check_rkey(struct rxe_qp *qp,
                                goto err2;
                        }
 
-                       resid = mtu;
+                       qp->resp.resid = mtu;
                } else {
                        if (pktlen != resid) {
                                state = RESPST_ERR_LENGTH;
index 9104e6b8cac9f7b66322d7ebc573ab6a32fa525a..e71af717e71b0f5e6d82eb36b65d887c6005dc09 100644 (file)
@@ -651,13 +651,6 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
                                                   SHOST_DIX_GUARD_CRC);
                }
 
-               /*
-                * Limit the sg_tablesize and max_sectors based on the device
-                * max fastreg page list length.
-                */
-               shost->sg_tablesize = min_t(unsigned short, shost->sg_tablesize,
-                       ib_conn->device->ib_device->attrs.max_fast_reg_page_list_len);
-
                if (iscsi_host_add(shost,
                                   ib_conn->device->ib_device->dma_device)) {
                        mutex_unlock(&iser_conn->state_mutex);
@@ -679,6 +672,10 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
        max_fr_sectors = ((shost->sg_tablesize - 1) * PAGE_SIZE) >> 9;
        shost->max_sectors = min(iser_max_sectors, max_fr_sectors);
 
+       iser_dbg("iser_conn %p, sg_tablesize %u, max_sectors %u\n",
+                iser_conn, shost->sg_tablesize,
+                shost->max_sectors);
+
        if (cmds_max > max_cmds) {
                iser_info("cmds_max changed from %u to %u\n",
                          cmds_max, max_cmds);
index 0be6a7c5ddb5aea294be7c10a8247895adbc2dde..9d0b22ad58c15759c3b92472083da15af5c42bac 100644 (file)
@@ -496,7 +496,6 @@ struct ib_conn {
  * @rx_descs:         rx buffers array (cyclic buffer)
  * @num_rx_descs:     number of rx descriptors
  * @scsi_sg_tablesize: scsi host sg_tablesize
- * @scsi_max_sectors: scsi host max sectors
  */
 struct iser_conn {
        struct ib_conn               ib_conn;
@@ -519,7 +518,6 @@ struct iser_conn {
        struct iser_rx_desc          *rx_descs;
        u32                          num_rx_descs;
        unsigned short               scsi_sg_tablesize;
-       unsigned int                 scsi_max_sectors;
        bool                         snd_w_inv;
 };
 
index 8ae7a3beddb728ee22825e28382b37bc467bbe94..6a9d1cb548ee8f7f34cfe1a1f9ad7c54de133271 100644 (file)
@@ -707,18 +707,7 @@ iser_calc_scsi_params(struct iser_conn *iser_conn,
        sup_sg_tablesize = min_t(unsigned, ISCSI_ISER_MAX_SG_TABLESIZE,
                                 device->ib_device->attrs.max_fast_reg_page_list_len);
 
-       if (sg_tablesize > sup_sg_tablesize) {
-               sg_tablesize = sup_sg_tablesize;
-               iser_conn->scsi_max_sectors = sg_tablesize * SIZE_4K / 512;
-       } else {
-               iser_conn->scsi_max_sectors = max_sectors;
-       }
-
-       iser_conn->scsi_sg_tablesize = sg_tablesize;
-
-       iser_dbg("iser_conn %p, sg_tablesize %u, max_sectors %u\n",
-                iser_conn, iser_conn->scsi_sg_tablesize,
-                iser_conn->scsi_max_sectors);
+       iser_conn->scsi_sg_tablesize = min(sg_tablesize, sup_sg_tablesize);
 }
 
 /**
index 8ddc071231931157ec459a6db4a0947d7b05f539..79bf48477ddb104097471a7a6040bcef2dfa0533 100644 (file)
@@ -371,6 +371,7 @@ static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
        struct srp_fr_desc *d;
        struct ib_mr *mr;
        int i, ret = -EINVAL;
+       enum ib_mr_type mr_type;
 
        if (pool_size <= 0)
                goto err;
@@ -384,9 +385,13 @@ static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
        spin_lock_init(&pool->lock);
        INIT_LIST_HEAD(&pool->free_list);
 
+       if (device->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG)
+               mr_type = IB_MR_TYPE_SG_GAPS;
+       else
+               mr_type = IB_MR_TYPE_MEM_REG;
+
        for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
-               mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG,
-                                max_page_list_len);
+               mr = ib_alloc_mr(pd, mr_type, max_page_list_len);
                if (IS_ERR(mr)) {
                        ret = PTR_ERR(mr);
                        if (ret == -ENOMEM)
@@ -3694,6 +3699,12 @@ static int __init srp_init_module(void)
                indirect_sg_entries = cmd_sg_entries;
        }
 
+       if (indirect_sg_entries > SG_MAX_SEGMENTS) {
+               pr_warn("Clamping indirect_sg_entries to %u\n",
+                       SG_MAX_SEGMENTS);
+               indirect_sg_entries = SG_MAX_SEGMENTS;
+       }
+
        srp_remove_wq = create_workqueue("srp_remove");
        if (!srp_remove_wq) {
                ret = -ENOMEM;
index 92595b98e7ede7b96dc4c9dd44f4629262f6e3a6..022be0e22eba97b10b95e653f48cee44f1d4ba36 100644 (file)
@@ -263,13 +263,21 @@ static int uinput_create_device(struct uinput_device *udev)
                return -EINVAL;
        }
 
-       if (test_bit(ABS_MT_SLOT, dev->absbit)) {
-               nslot = input_abs_get_max(dev, ABS_MT_SLOT) + 1;
-               error = input_mt_init_slots(dev, nslot, 0);
-               if (error)
+       if (test_bit(EV_ABS, dev->evbit)) {
+               input_alloc_absinfo(dev);
+               if (!dev->absinfo) {
+                       error = -EINVAL;
                        goto fail1;
-       } else if (test_bit(ABS_MT_POSITION_X, dev->absbit)) {
-               input_set_events_per_packet(dev, 60);
+               }
+
+               if (test_bit(ABS_MT_SLOT, dev->absbit)) {
+                       nslot = input_abs_get_max(dev, ABS_MT_SLOT) + 1;
+                       error = input_mt_init_slots(dev, nslot, 0);
+                       if (error)
+                               goto fail1;
+               } else if (test_bit(ABS_MT_POSITION_X, dev->absbit)) {
+                       input_set_events_per_packet(dev, 60);
+               }
        }
 
        if (test_bit(EV_FF, dev->evbit) && !udev->ff_effects_max) {
index fa598f7f4372c1ed0167b941a57a4b663a5361de..1e1d0ad406f2b7178bbb8cdcf1f320ade13ea0a1 100644 (file)
@@ -1231,6 +1231,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
        { "ELAN0000", 0 },
        { "ELAN0100", 0 },
        { "ELAN0600", 0 },
+       { "ELAN0605", 0 },
        { "ELAN1000", 0 },
        { }
 };
index 8993983e3fe4892b748ff5d77c1597880c63e2c9..bb7762bf2879b3a7383b2d326a859833adbbf360 100644 (file)
@@ -42,13 +42,19 @@ config RMI4_SMB
 config RMI4_F03
         bool "RMI4 Function 03 (PS2 Guest)"
        depends on RMI4_CORE
-       depends on SERIO=y || RMI4_CORE=SERIO
         help
           Say Y here if you want to add support for RMI4 function 03.
 
           Function 03 provides PS2 guest support for RMI4 devices. This
           includes support for TrackPoints on TouchPads.
 
+config RMI4_F03_SERIO
+       tristate
+       depends on RMI4_CORE
+       depends on RMI4_F03
+       default RMI4_CORE
+       select SERIO
+
 config RMI4_2D_SENSOR
        bool
        depends on RMI4_CORE
index 11447ab1055cd4beadf7eca752bdf9494d76cef1..bf5c36e229bacd63dd7e77d028aa65fb4555ce89 100644 (file)
@@ -901,7 +901,7 @@ void rmi_enable_irq(struct rmi_device *rmi_dev, bool clear_wake)
        data->enabled = true;
        if (clear_wake && device_may_wakeup(rmi_dev->xport->dev)) {
                retval = disable_irq_wake(irq);
-               if (!retval)
+               if (retval)
                        dev_warn(&rmi_dev->dev,
                                 "Failed to disable irq for wake: %d\n",
                                 retval);
@@ -936,7 +936,7 @@ void rmi_disable_irq(struct rmi_device *rmi_dev, bool enable_wake)
        disable_irq(irq);
        if (enable_wake && device_may_wakeup(rmi_dev->xport->dev)) {
                retval = enable_irq_wake(irq);
-               if (!retval)
+               if (retval)
                        dev_warn(&rmi_dev->dev,
                                 "Failed to enable irq for wake: %d\n",
                                 retval);
index 83cf11312fd971e0cacc16bd70eb474dd9c13b52..c9d1c91e1887094f2ef740d9eee3af8d16ee6f82 100644 (file)
@@ -682,7 +682,7 @@ static int wm97xx_probe(struct device *dev)
        }
        platform_set_drvdata(wm->battery_dev, wm);
        wm->battery_dev->dev.parent = dev;
-       wm->battery_dev->dev.platform_data = pdata->batt_pdata;
+       wm->battery_dev->dev.platform_data = pdata ? pdata->batt_pdata : NULL;
        ret = platform_device_add(wm->battery_dev);
        if (ret < 0)
                goto batt_reg_err;
index 8ee54d71c7eb3ad1e2a43f14068e75939e0dd077..37e204f3d9becccd31706ea4fa1241552868a975 100644 (file)
@@ -352,9 +352,6 @@ config MTK_IOMMU_V1
        select IOMMU_API
        select MEMORY
        select MTK_SMI
-       select COMMON_CLK_MT2701_MMSYS
-       select COMMON_CLK_MT2701_IMGSYS
-       select COMMON_CLK_MT2701_VDECSYS
        help
          Support for the M4U on certain Mediatek SoCs. M4U generation 1 HW is
          Multimedia Memory Managememt Unit. This option enables remapping of
index 3ef0f42984f2b1ec716509203988f9ebb70533cb..1b5b8c5361c506f2b835642eff2e3e57fbdf1370 100644 (file)
@@ -112,7 +112,7 @@ static struct timer_list queue_timer;
  * Domain for untranslated devices - only allocated
  * if iommu=pt passed on kernel cmd line.
  */
-static const struct iommu_ops amd_iommu_ops;
+const struct iommu_ops amd_iommu_ops;
 
 static ATOMIC_NOTIFIER_HEAD(ppr_notifier);
 int amd_iommu_max_glx_val = -1;
@@ -445,6 +445,7 @@ static void init_iommu_group(struct device *dev)
 static int iommu_init_device(struct device *dev)
 {
        struct iommu_dev_data *dev_data;
+       struct amd_iommu *iommu;
        int devid;
 
        if (dev->archdata.iommu)
@@ -454,6 +455,8 @@ static int iommu_init_device(struct device *dev)
        if (devid < 0)
                return devid;
 
+       iommu = amd_iommu_rlookup_table[devid];
+
        dev_data = find_dev_data(devid);
        if (!dev_data)
                return -ENOMEM;
@@ -469,8 +472,7 @@ static int iommu_init_device(struct device *dev)
 
        dev->archdata.iommu = dev_data;
 
-       iommu_device_link(amd_iommu_rlookup_table[dev_data->devid]->iommu_dev,
-                         dev);
+       iommu_device_link(&iommu->iommu, dev);
 
        return 0;
 }
@@ -495,13 +497,16 @@ static void iommu_ignore_device(struct device *dev)
 
 static void iommu_uninit_device(struct device *dev)
 {
-       int devid;
        struct iommu_dev_data *dev_data;
+       struct amd_iommu *iommu;
+       int devid;
 
        devid = get_device_id(dev);
        if (devid < 0)
                return;
 
+       iommu = amd_iommu_rlookup_table[devid];
+
        dev_data = search_dev_data(devid);
        if (!dev_data)
                return;
@@ -509,8 +514,7 @@ static void iommu_uninit_device(struct device *dev)
        if (dev_data->domain)
                detach_device(dev);
 
-       iommu_device_unlink(amd_iommu_rlookup_table[dev_data->devid]->iommu_dev,
-                           dev);
+       iommu_device_unlink(&iommu->iommu, dev);
 
        iommu_group_remove_device(dev);
 
@@ -3161,9 +3165,10 @@ static bool amd_iommu_capable(enum iommu_cap cap)
        return false;
 }
 
-static void amd_iommu_get_dm_regions(struct device *dev,
-                                    struct list_head *head)
+static void amd_iommu_get_resv_regions(struct device *dev,
+                                      struct list_head *head)
 {
+       struct iommu_resv_region *region;
        struct unity_map_entry *entry;
        int devid;
 
@@ -3172,41 +3177,56 @@ static void amd_iommu_get_dm_regions(struct device *dev,
                return;
 
        list_for_each_entry(entry, &amd_iommu_unity_map, list) {
-               struct iommu_dm_region *region;
+               size_t length;
+               int prot = 0;
 
                if (devid < entry->devid_start || devid > entry->devid_end)
                        continue;
 
-               region = kzalloc(sizeof(*region), GFP_KERNEL);
+               length = entry->address_end - entry->address_start;
+               if (entry->prot & IOMMU_PROT_IR)
+                       prot |= IOMMU_READ;
+               if (entry->prot & IOMMU_PROT_IW)
+                       prot |= IOMMU_WRITE;
+
+               region = iommu_alloc_resv_region(entry->address_start,
+                                                length, prot,
+                                                IOMMU_RESV_DIRECT);
                if (!region) {
                        pr_err("Out of memory allocating dm-regions for %s\n",
                                dev_name(dev));
                        return;
                }
-
-               region->start = entry->address_start;
-               region->length = entry->address_end - entry->address_start;
-               if (entry->prot & IOMMU_PROT_IR)
-                       region->prot |= IOMMU_READ;
-               if (entry->prot & IOMMU_PROT_IW)
-                       region->prot |= IOMMU_WRITE;
-
                list_add_tail(&region->list, head);
        }
+
+       region = iommu_alloc_resv_region(MSI_RANGE_START,
+                                        MSI_RANGE_END - MSI_RANGE_START + 1,
+                                        0, IOMMU_RESV_RESERVED);
+       if (!region)
+               return;
+       list_add_tail(&region->list, head);
+
+       region = iommu_alloc_resv_region(HT_RANGE_START,
+                                        HT_RANGE_END - HT_RANGE_START + 1,
+                                        0, IOMMU_RESV_RESERVED);
+       if (!region)
+               return;
+       list_add_tail(&region->list, head);
 }
 
-static void amd_iommu_put_dm_regions(struct device *dev,
+static void amd_iommu_put_resv_regions(struct device *dev,
                                     struct list_head *head)
 {
-       struct iommu_dm_region *entry, *next;
+       struct iommu_resv_region *entry, *next;
 
        list_for_each_entry_safe(entry, next, head, list)
                kfree(entry);
 }
 
-static void amd_iommu_apply_dm_region(struct device *dev,
+static void amd_iommu_apply_resv_region(struct device *dev,
                                      struct iommu_domain *domain,
-                                     struct iommu_dm_region *region)
+                                     struct iommu_resv_region *region)
 {
        struct dma_ops_domain *dma_dom = to_dma_ops_domain(to_pdomain(domain));
        unsigned long start, end;
@@ -3217,7 +3237,7 @@ static void amd_iommu_apply_dm_region(struct device *dev,
        WARN_ON_ONCE(reserve_iova(&dma_dom->iovad, start, end) == NULL);
 }
 
-static const struct iommu_ops amd_iommu_ops = {
+const struct iommu_ops amd_iommu_ops = {
        .capable = amd_iommu_capable,
        .domain_alloc = amd_iommu_domain_alloc,
        .domain_free  = amd_iommu_domain_free,
@@ -3230,9 +3250,9 @@ static const struct iommu_ops amd_iommu_ops = {
        .add_device = amd_iommu_add_device,
        .remove_device = amd_iommu_remove_device,
        .device_group = amd_iommu_device_group,
-       .get_dm_regions = amd_iommu_get_dm_regions,
-       .put_dm_regions = amd_iommu_put_dm_regions,
-       .apply_dm_region = amd_iommu_apply_dm_region,
+       .get_resv_regions = amd_iommu_get_resv_regions,
+       .put_resv_regions = amd_iommu_put_resv_regions,
+       .apply_resv_region = amd_iommu_apply_resv_region,
        .pgsize_bitmap  = AMD_IOMMU_PGSIZES,
 };
 
index 6799cf9713f77f460f990e6bc0f38b31422c0745..04cdac7ab3e34bbcc3a4cba19fbb57d44129549b 100644 (file)
@@ -94,6 +94,8 @@
  * out of it.
  */
 
+extern const struct iommu_ops amd_iommu_ops;
+
 /*
  * structure describing one IOMMU in the ACPI table. Typically followed by one
  * or more ivhd_entrys.
@@ -1635,9 +1637,10 @@ static int iommu_init_pci(struct amd_iommu *iommu)
        amd_iommu_erratum_746_workaround(iommu);
        amd_iommu_ats_write_check_workaround(iommu);
 
-       iommu->iommu_dev = iommu_device_create(&iommu->dev->dev, iommu,
-                                              amd_iommu_groups, "ivhd%d",
-                                              iommu->index);
+       iommu_device_sysfs_add(&iommu->iommu, &iommu->dev->dev,
+                              amd_iommu_groups, "ivhd%d", iommu->index);
+       iommu_device_set_ops(&iommu->iommu, &amd_iommu_ops);
+       iommu_device_register(&iommu->iommu);
 
        return pci_enable_device(iommu->dev);
 }
@@ -2230,7 +2233,7 @@ static int __init early_amd_iommu_init(void)
         */
        ret = check_ivrs_checksum(ivrs_base);
        if (ret)
-               return ret;
+               goto out;
 
        amd_iommu_target_ivhd_type = get_highest_supported_ivhd_type(ivrs_base);
        DUMP_printk("Using IVHD type %#x\n", amd_iommu_target_ivhd_type);
index 0d91785ebdc34accca7c4e9ed45da300ddbb68a7..af00f381a7b1a34e060039db569b274d04a532d1 100644 (file)
@@ -535,8 +535,8 @@ struct amd_iommu {
        /* if one, we need to send a completion wait command */
        bool need_sync;
 
-       /* IOMMU sysfs device */
-       struct device *iommu_dev;
+       /* Handle for IOMMU core code */
+       struct iommu_device iommu;
 
        /*
         * We can't rely on the BIOS to restore all values on reinit, so we
index 4d6ec444a9d63aa18232088bc9415c1f1eb6dfaa..5806a6acc94ecd7543c2435558a0907ec0934ff2 100644 (file)
 #define STRTAB_STE_1_SHCFG_INCOMING    1UL
 #define STRTAB_STE_1_SHCFG_SHIFT       44
 
-#define STRTAB_STE_1_PRIVCFG_UNPRIV    2UL
-#define STRTAB_STE_1_PRIVCFG_SHIFT     48
-
 #define STRTAB_STE_2_S2VMID_SHIFT      0
 #define STRTAB_STE_2_S2VMID_MASK       0xffffUL
 #define STRTAB_STE_2_VTCR_SHIFT                32
 /* High-level queue structures */
 #define ARM_SMMU_POLL_TIMEOUT_US       100
 
+#define MSI_IOVA_BASE                  0x8000000
+#define MSI_IOVA_LENGTH                        0x100000
+
 static bool disable_bypass;
 module_param_named(disable_bypass, disable_bypass, bool, S_IRUGO);
 MODULE_PARM_DESC(disable_bypass,
@@ -616,6 +616,9 @@ struct arm_smmu_device {
        unsigned int                    sid_bits;
 
        struct arm_smmu_strtab_cfg      strtab_cfg;
+
+       /* IOMMU core code handle */
+       struct iommu_device             iommu;
 };
 
 /* SMMU private data for each master */
@@ -1042,13 +1045,8 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
                }
        }
 
-       /* Nuke the existing Config, as we're going to rewrite it */
-       val &= ~(STRTAB_STE_0_CFG_MASK << STRTAB_STE_0_CFG_SHIFT);
-
-       if (ste->valid)
-               val |= STRTAB_STE_0_V;
-       else
-               val &= ~STRTAB_STE_0_V;
+       /* Nuke the existing STE_0 value, as we're going to rewrite it */
+       val = ste->valid ? STRTAB_STE_0_V : 0;
 
        if (ste->bypass) {
                val |= disable_bypass ? STRTAB_STE_0_CFG_ABORT
@@ -1073,9 +1071,7 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
 #ifdef CONFIG_PCI_ATS
                         STRTAB_STE_1_EATS_TRANS << STRTAB_STE_1_EATS_SHIFT |
 #endif
-                        STRTAB_STE_1_STRW_NSEL1 << STRTAB_STE_1_STRW_SHIFT |
-                        STRTAB_STE_1_PRIVCFG_UNPRIV <<
-                        STRTAB_STE_1_PRIVCFG_SHIFT);
+                        STRTAB_STE_1_STRW_NSEL1 << STRTAB_STE_1_STRW_SHIFT);
 
                if (smmu->features & ARM_SMMU_FEAT_STALLS)
                        dst[1] |= cpu_to_le64(STRTAB_STE_1_S1STALLD);
@@ -1083,7 +1079,6 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
                val |= (ste->s1_cfg->cdptr_dma & STRTAB_STE_0_S1CTXPTR_MASK
                        << STRTAB_STE_0_S1CTXPTR_SHIFT) |
                        STRTAB_STE_0_CFG_S1_TRANS;
-
        }
 
        if (ste->s2_cfg) {
@@ -1372,8 +1367,6 @@ static bool arm_smmu_capable(enum iommu_cap cap)
        switch (cap) {
        case IOMMU_CAP_CACHE_COHERENCY:
                return true;
-       case IOMMU_CAP_INTR_REMAP:
-               return true; /* MSIs are just memory writes */
        case IOMMU_CAP_NOEXEC:
                return true;
        default:
@@ -1795,8 +1788,10 @@ static int arm_smmu_add_device(struct device *dev)
        }
 
        group = iommu_group_get_for_dev(dev);
-       if (!IS_ERR(group))
+       if (!IS_ERR(group)) {
                iommu_group_put(group);
+               iommu_device_link(&smmu->iommu, dev);
+       }
 
        return PTR_ERR_OR_ZERO(group);
 }
@@ -1805,14 +1800,17 @@ static void arm_smmu_remove_device(struct device *dev)
 {
        struct iommu_fwspec *fwspec = dev->iommu_fwspec;
        struct arm_smmu_master_data *master;
+       struct arm_smmu_device *smmu;
 
        if (!fwspec || fwspec->ops != &arm_smmu_ops)
                return;
 
        master = fwspec->iommu_priv;
+       smmu = master->smmu;
        if (master && master->ste.valid)
                arm_smmu_detach_dev(dev);
        iommu_group_remove_device(dev);
+       iommu_device_unlink(&smmu->iommu, dev);
        kfree(master);
        iommu_fwspec_free(dev);
 }
@@ -1883,6 +1881,29 @@ static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
        return iommu_fwspec_add_ids(dev, args->args, 1);
 }
 
+static void arm_smmu_get_resv_regions(struct device *dev,
+                                     struct list_head *head)
+{
+       struct iommu_resv_region *region;
+       int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
+
+       region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
+                                        prot, IOMMU_RESV_MSI);
+       if (!region)
+               return;
+
+       list_add_tail(&region->list, head);
+}
+
+static void arm_smmu_put_resv_regions(struct device *dev,
+                                     struct list_head *head)
+{
+       struct iommu_resv_region *entry, *next;
+
+       list_for_each_entry_safe(entry, next, head, list)
+               kfree(entry);
+}
+
 static struct iommu_ops arm_smmu_ops = {
        .capable                = arm_smmu_capable,
        .domain_alloc           = arm_smmu_domain_alloc,
@@ -1898,6 +1919,8 @@ static struct iommu_ops arm_smmu_ops = {
        .domain_get_attr        = arm_smmu_domain_get_attr,
        .domain_set_attr        = arm_smmu_domain_set_attr,
        .of_xlate               = arm_smmu_of_xlate,
+       .get_resv_regions       = arm_smmu_get_resv_regions,
+       .put_resv_regions       = arm_smmu_put_resv_regions,
        .pgsize_bitmap          = -1UL, /* Restricted during device attach */
 };
 
@@ -1983,17 +2006,9 @@ static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu)
        u32 size, l1size;
        struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
 
-       /*
-        * If we can resolve everything with a single L2 table, then we
-        * just need a single L1 descriptor. Otherwise, calculate the L1
-        * size, capped to the SIDSIZE.
-        */
-       if (smmu->sid_bits < STRTAB_SPLIT) {
-               size = 0;
-       } else {
-               size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3);
-               size = min(size, smmu->sid_bits - STRTAB_SPLIT);
-       }
+       /* Calculate the L1 size, capped to the SIDSIZE. */
+       size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3);
+       size = min(size, smmu->sid_bits - STRTAB_SPLIT);
        cfg->num_l1_ents = 1 << size;
 
        size += STRTAB_SPLIT;
@@ -2504,6 +2519,13 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
        smmu->ssid_bits = reg >> IDR1_SSID_SHIFT & IDR1_SSID_MASK;
        smmu->sid_bits = reg >> IDR1_SID_SHIFT & IDR1_SID_MASK;
 
+       /*
+        * If the SMMU supports fewer bits than would fill a single L2 stream
+        * table, use a linear table instead.
+        */
+       if (smmu->sid_bits <= STRTAB_SPLIT)
+               smmu->features &= ~ARM_SMMU_FEAT_2_LVL_STRTAB;
+
        /* IDR5 */
        reg = readl_relaxed(smmu->base + ARM_SMMU_IDR5);
 
@@ -2613,6 +2635,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
 {
        int irq, ret;
        struct resource *res;
+       resource_size_t ioaddr;
        struct arm_smmu_device *smmu;
        struct device *dev = &pdev->dev;
        bool bypass;
@@ -2630,6 +2653,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
                dev_err(dev, "MMIO region too small (%pr)\n", res);
                return -EINVAL;
        }
+       ioaddr = res->start;
 
        smmu->base = devm_ioremap_resource(dev, res);
        if (IS_ERR(smmu->base))
@@ -2682,7 +2706,15 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
                return ret;
 
        /* And we're up. Go go go! */
-       iommu_register_instance(dev->fwnode, &arm_smmu_ops);
+       ret = iommu_device_sysfs_add(&smmu->iommu, dev, NULL,
+                                    "smmu3.%pa", &ioaddr);
+       if (ret)
+               return ret;
+
+       iommu_device_set_ops(&smmu->iommu, &arm_smmu_ops);
+       iommu_device_set_fwnode(&smmu->iommu, dev->fwnode);
+
+       ret = iommu_device_register(&smmu->iommu);
 
 #ifdef CONFIG_PCI
        if (pci_bus_type.iommu_ops != &arm_smmu_ops) {
index a60cded8a6eddf445dcb1e2b0177999c207c8a74..abf6496843a617070289377ffad3fd1e119b0aa6 100644 (file)
@@ -24,6 +24,7 @@
  *     - v7/v8 long-descriptor format
  *     - Non-secure access to the SMMU
  *     - Context fault reporting
+ *     - Extended Stream ID (16 bit)
  */
 
 #define pr_fmt(fmt) "arm-smmu: " fmt
@@ -87,6 +88,7 @@
 #define sCR0_CLIENTPD                  (1 << 0)
 #define sCR0_GFRE                      (1 << 1)
 #define sCR0_GFIE                      (1 << 2)
+#define sCR0_EXIDENABLE                        (1 << 3)
 #define sCR0_GCFGFRE                   (1 << 4)
 #define sCR0_GCFGFIE                   (1 << 5)
 #define sCR0_USFCFG                    (1 << 10)
 #define ID0_NUMIRPT_MASK               0xff
 #define ID0_NUMSIDB_SHIFT              9
 #define ID0_NUMSIDB_MASK               0xf
+#define ID0_EXIDS                      (1 << 8)
 #define ID0_NUMSMRG_SHIFT              0
 #define ID0_NUMSMRG_MASK               0xff
 
 #define ARM_SMMU_GR0_S2CR(n)           (0xc00 + ((n) << 2))
 #define S2CR_CBNDX_SHIFT               0
 #define S2CR_CBNDX_MASK                        0xff
+#define S2CR_EXIDVALID                 (1 << 10)
 #define S2CR_TYPE_SHIFT                        16
 #define S2CR_TYPE_MASK                 0x3
 enum arm_smmu_s2cr_type {
@@ -260,6 +264,7 @@ enum arm_smmu_s2cr_privcfg {
 
 #define TTBCR2_SEP_SHIFT               15
 #define TTBCR2_SEP_UPSTREAM            (0x7 << TTBCR2_SEP_SHIFT)
+#define TTBCR2_AS                      (1 << 4)
 
 #define TTBRn_ASID_SHIFT               48
 
@@ -281,6 +286,9 @@ enum arm_smmu_s2cr_privcfg {
 
 #define FSYNR0_WNR                     (1 << 4)
 
+#define MSI_IOVA_BASE                  0x8000000
+#define MSI_IOVA_LENGTH                        0x100000
+
 static int force_stage;
 module_param(force_stage, int, S_IRUGO);
 MODULE_PARM_DESC(force_stage,
@@ -351,6 +359,7 @@ struct arm_smmu_device {
 #define ARM_SMMU_FEAT_FMT_AARCH64_64K  (1 << 9)
 #define ARM_SMMU_FEAT_FMT_AARCH32_L    (1 << 10)
 #define ARM_SMMU_FEAT_FMT_AARCH32_S    (1 << 11)
+#define ARM_SMMU_FEAT_EXIDS            (1 << 12)
        u32                             features;
 
 #define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
@@ -380,6 +389,9 @@ struct arm_smmu_device {
        unsigned int                    *irqs;
 
        u32                             cavium_id_base; /* Specific to Cavium */
+
+       /* IOMMU core code handle */
+       struct iommu_device             iommu;
 };
 
 enum arm_smmu_context_fmt {
@@ -778,6 +790,8 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
                        reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
                        reg2 = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
                        reg2 |= TTBCR2_SEP_UPSTREAM;
+                       if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
+                               reg2 |= TTBCR2_AS;
                }
                if (smmu->version > ARM_SMMU_V1)
                        writel_relaxed(reg2, cb_base + ARM_SMMU_CB_TTBCR2);
@@ -1048,7 +1062,7 @@ static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
        struct arm_smmu_smr *smr = smmu->smrs + idx;
        u32 reg = smr->id << SMR_ID_SHIFT | smr->mask << SMR_MASK_SHIFT;
 
-       if (smr->valid)
+       if (!(smmu->features & ARM_SMMU_FEAT_EXIDS) && smr->valid)
                reg |= SMR_VALID;
        writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
 }
@@ -1060,6 +1074,9 @@ static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
                  (s2cr->cbndx & S2CR_CBNDX_MASK) << S2CR_CBNDX_SHIFT |
                  (s2cr->privcfg & S2CR_PRIVCFG_MASK) << S2CR_PRIVCFG_SHIFT;
 
+       if (smmu->features & ARM_SMMU_FEAT_EXIDS && smmu->smrs &&
+           smmu->smrs[idx].valid)
+               reg |= S2CR_EXIDVALID;
        writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx));
 }
 
@@ -1070,6 +1087,34 @@ static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
                arm_smmu_write_smr(smmu, idx);
 }
 
+/*
+ * The width of SMR's mask field depends on sCR0_EXIDENABLE, so this function
+ * should be called after sCR0 is written.
+ */
+static void arm_smmu_test_smr_masks(struct arm_smmu_device *smmu)
+{
+       void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
+       u32 smr;
+
+       if (!smmu->smrs)
+               return;
+
+       /*
+        * SMR.ID bits may not be preserved if the corresponding MASK
+        * bits are set, so check each one separately. We can reject
+        * masters later if they try to claim IDs outside these masks.
+        */
+       smr = smmu->streamid_mask << SMR_ID_SHIFT;
+       writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
+       smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
+       smmu->streamid_mask = smr >> SMR_ID_SHIFT;
+
+       smr = smmu->streamid_mask << SMR_MASK_SHIFT;
+       writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
+       smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
+       smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT;
+}
+
 static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
 {
        struct arm_smmu_smr *smrs = smmu->smrs;
@@ -1214,7 +1259,7 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
                        continue;
 
                s2cr[idx].type = type;
-               s2cr[idx].privcfg = S2CR_PRIVCFG_UNPRIV;
+               s2cr[idx].privcfg = S2CR_PRIVCFG_DEFAULT;
                s2cr[idx].cbndx = cbndx;
                arm_smmu_write_s2cr(smmu, idx);
        }
@@ -1371,8 +1416,6 @@ static bool arm_smmu_capable(enum iommu_cap cap)
                 * requests.
                 */
                return true;
-       case IOMMU_CAP_INTR_REMAP:
-               return true; /* MSIs are just memory writes */
        case IOMMU_CAP_NOEXEC:
                return true;
        default:
@@ -1444,6 +1487,8 @@ static int arm_smmu_add_device(struct device *dev)
        if (ret)
                goto out_free;
 
+       iommu_device_link(&smmu->iommu, dev);
+
        return 0;
 
 out_free:
@@ -1456,10 +1501,17 @@ out_free:
 static void arm_smmu_remove_device(struct device *dev)
 {
        struct iommu_fwspec *fwspec = dev->iommu_fwspec;
+       struct arm_smmu_master_cfg *cfg;
+       struct arm_smmu_device *smmu;
+
 
        if (!fwspec || fwspec->ops != &arm_smmu_ops)
                return;
 
+       cfg  = fwspec->iommu_priv;
+       smmu = cfg->smmu;
+
+       iommu_device_unlink(&smmu->iommu, dev);
        arm_smmu_master_free_smes(fwspec);
        iommu_group_remove_device(dev);
        kfree(fwspec->iommu_priv);
@@ -1549,6 +1601,29 @@ static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
        return iommu_fwspec_add_ids(dev, &fwid, 1);
 }
 
+static void arm_smmu_get_resv_regions(struct device *dev,
+                                     struct list_head *head)
+{
+       struct iommu_resv_region *region;
+       int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
+
+       region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
+                                        prot, IOMMU_RESV_MSI);
+       if (!region)
+               return;
+
+       list_add_tail(&region->list, head);
+}
+
+static void arm_smmu_put_resv_regions(struct device *dev,
+                                     struct list_head *head)
+{
+       struct iommu_resv_region *entry, *next;
+
+       list_for_each_entry_safe(entry, next, head, list)
+               kfree(entry);
+}
+
 static struct iommu_ops arm_smmu_ops = {
        .capable                = arm_smmu_capable,
        .domain_alloc           = arm_smmu_domain_alloc,
@@ -1564,6 +1639,8 @@ static struct iommu_ops arm_smmu_ops = {
        .domain_get_attr        = arm_smmu_domain_get_attr,
        .domain_set_attr        = arm_smmu_domain_set_attr,
        .of_xlate               = arm_smmu_of_xlate,
+       .get_resv_regions       = arm_smmu_get_resv_regions,
+       .put_resv_regions       = arm_smmu_put_resv_regions,
        .pgsize_bitmap          = -1UL, /* Restricted during device attach */
 };
 
@@ -1648,6 +1725,9 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
        if (smmu->features & ARM_SMMU_FEAT_VMID16)
                reg |= sCR0_VMID16EN;
 
+       if (smmu->features & ARM_SMMU_FEAT_EXIDS)
+               reg |= sCR0_EXIDENABLE;
+
        /* Push the button */
        __arm_smmu_tlb_sync(smmu);
        writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
@@ -1735,11 +1815,14 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
                           "\t(IDR0.CTTW overridden by FW configuration)\n");
 
        /* Max. number of entries we have for stream matching/indexing */
-       size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK);
+       if (smmu->version == ARM_SMMU_V2 && id & ID0_EXIDS) {
+               smmu->features |= ARM_SMMU_FEAT_EXIDS;
+               size = 1 << 16;
+       } else {
+               size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK);
+       }
        smmu->streamid_mask = size - 1;
        if (id & ID0_SMS) {
-               u32 smr;
-
                smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
                size = (id >> ID0_NUMSMRG_SHIFT) & ID0_NUMSMRG_MASK;
                if (size == 0) {
@@ -1748,21 +1831,6 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
                        return -ENODEV;
                }
 
-               /*
-                * SMR.ID bits may not be preserved if the corresponding MASK
-                * bits are set, so check each one separately. We can reject
-                * masters later if they try to claim IDs outside these masks.
-                */
-               smr = smmu->streamid_mask << SMR_ID_SHIFT;
-               writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
-               smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
-               smmu->streamid_mask = smr >> SMR_ID_SHIFT;
-
-               smr = smmu->streamid_mask << SMR_MASK_SHIFT;
-               writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
-               smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
-               smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT;
-
                /* Zero-initialised to mark as invalid */
                smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
                                          GFP_KERNEL);
@@ -1770,8 +1838,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
                        return -ENOMEM;
 
                dev_notice(smmu->dev,
-                          "\tstream matching with %lu register groups, mask 0x%x",
-                          size, smmu->smr_mask_mask);
+                          "\tstream matching with %lu register groups", size);
        }
        /* s2cr->type == 0 means translation, so initialise explicitly */
        smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
@@ -2011,6 +2078,7 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev,
 static int arm_smmu_device_probe(struct platform_device *pdev)
 {
        struct resource *res;
+       resource_size_t ioaddr;
        struct arm_smmu_device *smmu;
        struct device *dev = &pdev->dev;
        int num_irqs, i, err;
@@ -2031,6 +2099,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
                return err;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       ioaddr = res->start;
        smmu->base = devm_ioremap_resource(dev, res);
        if (IS_ERR(smmu->base))
                return PTR_ERR(smmu->base);
@@ -2091,9 +2160,25 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
                }
        }
 
-       iommu_register_instance(dev->fwnode, &arm_smmu_ops);
+       err = iommu_device_sysfs_add(&smmu->iommu, smmu->dev, NULL,
+                                    "smmu.%pa", &ioaddr);
+       if (err) {
+               dev_err(dev, "Failed to register iommu in sysfs\n");
+               return err;
+       }
+
+       iommu_device_set_ops(&smmu->iommu, &arm_smmu_ops);
+       iommu_device_set_fwnode(&smmu->iommu, dev->fwnode);
+
+       err = iommu_device_register(&smmu->iommu);
+       if (err) {
+               dev_err(dev, "Failed to register iommu\n");
+               return err;
+       }
+
        platform_set_drvdata(pdev, smmu);
        arm_smmu_device_reset(smmu);
+       arm_smmu_test_smr_masks(smmu);
 
        /* Oh, for a proper bus abstraction */
        if (!iommu_present(&platform_bus_type))
index 2db0d641cf4505b565656d99dbb29309247cd964..48d36ce59efbfd6305f8e85e9ae85875f71215f8 100644 (file)
@@ -37,15 +37,50 @@ struct iommu_dma_msi_page {
        phys_addr_t             phys;
 };
 
+enum iommu_dma_cookie_type {
+       IOMMU_DMA_IOVA_COOKIE,
+       IOMMU_DMA_MSI_COOKIE,
+};
+
 struct iommu_dma_cookie {
-       struct iova_domain      iovad;
-       struct list_head        msi_page_list;
-       spinlock_t              msi_lock;
+       enum iommu_dma_cookie_type      type;
+       union {
+               /* Full allocator for IOMMU_DMA_IOVA_COOKIE */
+               struct iova_domain      iovad;
+               /* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */
+               dma_addr_t              msi_iova;
+       };
+       struct list_head                msi_page_list;
+       spinlock_t                      msi_lock;
 };
 
+static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie)
+{
+       if (cookie->type == IOMMU_DMA_IOVA_COOKIE)
+               return cookie->iovad.granule;
+       return PAGE_SIZE;
+}
+
 static inline struct iova_domain *cookie_iovad(struct iommu_domain *domain)
 {
-       return &((struct iommu_dma_cookie *)domain->iova_cookie)->iovad;
+       struct iommu_dma_cookie *cookie = domain->iova_cookie;
+
+       if (cookie->type == IOMMU_DMA_IOVA_COOKIE)
+               return &cookie->iovad;
+       return NULL;
+}
+
+static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type)
+{
+       struct iommu_dma_cookie *cookie;
+
+       cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
+       if (cookie) {
+               spin_lock_init(&cookie->msi_lock);
+               INIT_LIST_HEAD(&cookie->msi_page_list);
+               cookie->type = type;
+       }
+       return cookie;
 }
 
 int iommu_dma_init(void)
@@ -61,26 +96,54 @@ int iommu_dma_init(void)
  * callback when domain->type == IOMMU_DOMAIN_DMA.
  */
 int iommu_get_dma_cookie(struct iommu_domain *domain)
+{
+       if (domain->iova_cookie)
+               return -EEXIST;
+
+       domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE);
+       if (!domain->iova_cookie)
+               return -ENOMEM;
+
+       return 0;
+}
+EXPORT_SYMBOL(iommu_get_dma_cookie);
+
+/**
+ * iommu_get_msi_cookie - Acquire just MSI remapping resources
+ * @domain: IOMMU domain to prepare
+ * @base: Start address of IOVA region for MSI mappings
+ *
+ * Users who manage their own IOVA allocation and do not want DMA API support,
+ * but would still like to take advantage of automatic MSI remapping, can use
+ * this to initialise their own domain appropriately. Users should reserve a
+ * contiguous IOVA region, starting at @base, large enough to accommodate the
+ * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address
+ * used by the devices attached to @domain.
+ */
+int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
 {
        struct iommu_dma_cookie *cookie;
 
+       if (domain->type != IOMMU_DOMAIN_UNMANAGED)
+               return -EINVAL;
+
        if (domain->iova_cookie)
                return -EEXIST;
 
-       cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
+       cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE);
        if (!cookie)
                return -ENOMEM;
 
-       spin_lock_init(&cookie->msi_lock);
-       INIT_LIST_HEAD(&cookie->msi_page_list);
+       cookie->msi_iova = base;
        domain->iova_cookie = cookie;
        return 0;
 }
-EXPORT_SYMBOL(iommu_get_dma_cookie);
+EXPORT_SYMBOL(iommu_get_msi_cookie);
 
 /**
  * iommu_put_dma_cookie - Release a domain's DMA mapping resources
- * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
+ * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or
+ *          iommu_get_msi_cookie()
  *
  * IOMMU drivers should normally call this from their domain_free callback.
  */
@@ -92,7 +155,7 @@ void iommu_put_dma_cookie(struct iommu_domain *domain)
        if (!cookie)
                return;
 
-       if (cookie->iovad.granule)
+       if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule)
                put_iova_domain(&cookie->iovad);
 
        list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) {
@@ -137,11 +200,13 @@ static void iova_reserve_pci_windows(struct pci_dev *dev,
 int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
                u64 size, struct device *dev)
 {
-       struct iova_domain *iovad = cookie_iovad(domain);
+       struct iommu_dma_cookie *cookie = domain->iova_cookie;
+       struct iova_domain *iovad = &cookie->iovad;
        unsigned long order, base_pfn, end_pfn;
+       bool pci = dev && dev_is_pci(dev);
 
-       if (!iovad)
-               return -ENODEV;
+       if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
+               return -EINVAL;
 
        /* Use the smallest supported page size for IOVA granularity */
        order = __ffs(domain->pgsize_bitmap);
@@ -161,19 +226,31 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
                end_pfn = min_t(unsigned long, end_pfn,
                                domain->geometry.aperture_end >> order);
        }
+       /*
+        * PCI devices may have larger DMA masks, but still prefer allocating
+        * within a 32-bit mask to avoid DAC addressing. Such limitations don't
+        * apply to the typical platform device, so for those we may as well
+        * leave the cache limit at the top of their range to save an rb_last()
+        * traversal on every allocation.
+        */
+       if (pci)
+               end_pfn &= DMA_BIT_MASK(32) >> order;
 
-       /* All we can safely do with an existing domain is enlarge it */
+       /* start_pfn is always nonzero for an already-initialised domain */
        if (iovad->start_pfn) {
                if (1UL << order != iovad->granule ||
-                   base_pfn != iovad->start_pfn ||
-                   end_pfn < iovad->dma_32bit_pfn) {
+                   base_pfn != iovad->start_pfn) {
                        pr_warn("Incompatible range for DMA domain\n");
                        return -EFAULT;
                }
-               iovad->dma_32bit_pfn = end_pfn;
+               /*
+                * If we have devices with different DMA masks, move the free
+                * area cache limit down for the benefit of the smaller one.
+                */
+               iovad->dma_32bit_pfn = min(end_pfn, iovad->dma_32bit_pfn);
        } else {
                init_iova_domain(iovad, 1UL << order, base_pfn, end_pfn);
-               if (dev && dev_is_pci(dev))
+               if (pci)
                        iova_reserve_pci_windows(to_pci_dev(dev), iovad);
        }
        return 0;
@@ -181,16 +258,22 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
 EXPORT_SYMBOL(iommu_dma_init_domain);
 
 /**
- * dma_direction_to_prot - Translate DMA API directions to IOMMU API page flags
+ * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API
+ *                    page flags.
  * @dir: Direction of DMA transfer
  * @coherent: Is the DMA master cache-coherent?
+ * @attrs: DMA attributes for the mapping
  *
  * Return: corresponding IOMMU API page protection flags
  */
-int dma_direction_to_prot(enum dma_data_direction dir, bool coherent)
+int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
+                    unsigned long attrs)
 {
        int prot = coherent ? IOMMU_CACHE : 0;
 
+       if (attrs & DMA_ATTR_PRIVILEGED)
+               prot |= IOMMU_PRIV;
+
        switch (dir) {
        case DMA_BIDIRECTIONAL:
                return prot | IOMMU_READ | IOMMU_WRITE;
@@ -204,19 +287,28 @@ int dma_direction_to_prot(enum dma_data_direction dir, bool coherent)
 }
 
 static struct iova *__alloc_iova(struct iommu_domain *domain, size_t size,
-               dma_addr_t dma_limit)
+               dma_addr_t dma_limit, struct device *dev)
 {
        struct iova_domain *iovad = cookie_iovad(domain);
        unsigned long shift = iova_shift(iovad);
        unsigned long length = iova_align(iovad, size) >> shift;
+       struct iova *iova = NULL;
 
        if (domain->geometry.force_aperture)
                dma_limit = min(dma_limit, domain->geometry.aperture_end);
+
+       /* Try to get PCI devices a SAC address */
+       if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev))
+               iova = alloc_iova(iovad, length, DMA_BIT_MASK(32) >> shift,
+                                 true);
        /*
         * Enforce size-alignment to be safe - there could perhaps be an
         * attribute to control this per-device, or at least per-domain...
         */
-       return alloc_iova(iovad, length, dma_limit >> shift, true);
+       if (!iova)
+               iova = alloc_iova(iovad, length, dma_limit >> shift, true);
+
+       return iova;
 }
 
 /* The IOVA allocator knows what we mapped, so just unmap whatever that was */
@@ -369,7 +461,7 @@ struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
        if (!pages)
                return NULL;
 
-       iova = __alloc_iova(domain, size, dev->coherent_dma_mask);
+       iova = __alloc_iova(domain, size, dev->coherent_dma_mask, dev);
        if (!iova)
                goto out_free_pages;
 
@@ -440,7 +532,7 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
        struct iova_domain *iovad = cookie_iovad(domain);
        size_t iova_off = iova_offset(iovad, phys);
        size_t len = iova_align(iovad, size + iova_off);
-       struct iova *iova = __alloc_iova(domain, len, dma_get_mask(dev));
+       struct iova *iova = __alloc_iova(domain, len, dma_get_mask(dev), dev);
 
        if (!iova)
                return DMA_ERROR_CODE;
@@ -598,7 +690,7 @@ int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
                prev = s;
        }
 
-       iova = __alloc_iova(domain, iova_len, dma_get_mask(dev));
+       iova = __alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
        if (!iova)
                goto out_restore_sg;
 
@@ -633,7 +725,7 @@ dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
                size_t size, enum dma_data_direction dir, unsigned long attrs)
 {
        return __iommu_dma_map(dev, phys, size,
-                       dma_direction_to_prot(dir, false) | IOMMU_MMIO);
+                       dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO);
 }
 
 void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
@@ -642,16 +734,6 @@ void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
        __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle);
 }
 
-int iommu_dma_supported(struct device *dev, u64 mask)
-{
-       /*
-        * 'Special' IOMMUs which don't have the same addressing capability
-        * as the CPU will have to wait until we have some way to query that
-        * before they'll be able to use this framework.
-        */
-       return 1;
-}
-
 int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
 {
        return dma_addr == DMA_ERROR_CODE;
@@ -662,11 +744,12 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
 {
        struct iommu_dma_cookie *cookie = domain->iova_cookie;
        struct iommu_dma_msi_page *msi_page;
-       struct iova_domain *iovad = &cookie->iovad;
+       struct iova_domain *iovad = cookie_iovad(domain);
        struct iova *iova;
        int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
+       size_t size = cookie_msi_granule(cookie);
 
-       msi_addr &= ~(phys_addr_t)iova_mask(iovad);
+       msi_addr &= ~(phys_addr_t)(size - 1);
        list_for_each_entry(msi_page, &cookie->msi_page_list, list)
                if (msi_page->phys == msi_addr)
                        return msi_page;
@@ -675,13 +758,18 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
        if (!msi_page)
                return NULL;
 
-       iova = __alloc_iova(domain, iovad->granule, dma_get_mask(dev));
-       if (!iova)
-               goto out_free_page;
-
        msi_page->phys = msi_addr;
-       msi_page->iova = iova_dma_addr(iovad, iova);
-       if (iommu_map(domain, msi_page->iova, msi_addr, iovad->granule, prot))
+       if (iovad) {
+               iova = __alloc_iova(domain, size, dma_get_mask(dev), dev);
+               if (!iova)
+                       goto out_free_page;
+               msi_page->iova = iova_dma_addr(iovad, iova);
+       } else {
+               msi_page->iova = cookie->msi_iova;
+               cookie->msi_iova += size;
+       }
+
+       if (iommu_map(domain, msi_page->iova, msi_addr, size, prot))
                goto out_free_iova;
 
        INIT_LIST_HEAD(&msi_page->list);
@@ -689,7 +777,10 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
        return msi_page;
 
 out_free_iova:
-       __free_iova(iovad, iova);
+       if (iovad)
+               __free_iova(iovad, iova);
+       else
+               cookie->msi_iova -= size;
 out_free_page:
        kfree(msi_page);
        return NULL;
@@ -730,7 +821,7 @@ void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg)
                msg->data = ~0U;
        } else {
                msg->address_hi = upper_32_bits(msi_page->iova);
-               msg->address_lo &= iova_mask(&cookie->iovad);
+               msg->address_lo &= cookie_msi_granule(cookie) - 1;
                msg->address_lo += lower_32_bits(msi_page->iova);
        }
 }
index 8ccbd7023194ee592fa91dafb67565d1ad9928aa..d9c0decfc91ae2cef9ccf21c3a99afb455bbb9fa 100644 (file)
@@ -74,6 +74,8 @@ static unsigned long dmar_seq_ids[BITS_TO_LONGS(DMAR_UNITS_SUPPORTED)];
 static int alloc_iommu(struct dmar_drhd_unit *drhd);
 static void free_iommu(struct intel_iommu *iommu);
 
+extern const struct iommu_ops intel_iommu_ops;
+
 static void dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
 {
        /*
@@ -1078,14 +1080,17 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
        raw_spin_lock_init(&iommu->register_lock);
 
        if (intel_iommu_enabled) {
-               iommu->iommu_dev = iommu_device_create(NULL, iommu,
-                                                      intel_iommu_groups,
-                                                      "%s", iommu->name);
+               err = iommu_device_sysfs_add(&iommu->iommu, NULL,
+                                            intel_iommu_groups,
+                                            "%s", iommu->name);
+               if (err)
+                       goto err_unmap;
 
-               if (IS_ERR(iommu->iommu_dev)) {
-                       err = PTR_ERR(iommu->iommu_dev);
+               iommu_device_set_ops(&iommu->iommu, &intel_iommu_ops);
+
+               err = iommu_device_register(&iommu->iommu);
+               if (err)
                        goto err_unmap;
-               }
        }
 
        drhd->iommu = iommu;
@@ -1103,7 +1108,8 @@ error:
 
 static void free_iommu(struct intel_iommu *iommu)
 {
-       iommu_device_destroy(iommu->iommu_dev);
+       iommu_device_sysfs_remove(&iommu->iommu);
+       iommu_device_unregister(&iommu->iommu);
 
        if (iommu->irq) {
                if (iommu->pr_irq) {
index 57ba0d3091ea257a221de36f3143311db3989d63..a7e0821c9967e490258921238e6640723e79375d 100644 (file)
@@ -276,6 +276,8 @@ struct sysmmu_drvdata {
        struct list_head owner_node;    /* node for owner controllers list */
        phys_addr_t pgtable;            /* assigned page table structure */
        unsigned int version;           /* our version */
+
+       struct iommu_device iommu;      /* IOMMU core handle */
 };
 
 static struct exynos_iommu_domain *to_exynos_domain(struct iommu_domain *dom)
@@ -381,13 +383,14 @@ static void show_fault_information(struct sysmmu_drvdata *data,
 {
        sysmmu_pte_t *ent;
 
-       dev_err(data->sysmmu, "%s FAULT occurred at %#x (page table base: %pa)\n",
-               finfo->name, fault_addr, &data->pgtable);
+       dev_err(data->sysmmu, "%s: %s FAULT occurred at %#x\n",
+               dev_name(data->master), finfo->name, fault_addr);
+       dev_dbg(data->sysmmu, "Page table base: %pa\n", &data->pgtable);
        ent = section_entry(phys_to_virt(data->pgtable), fault_addr);
-       dev_err(data->sysmmu, "\tLv1 entry: %#x\n", *ent);
+       dev_dbg(data->sysmmu, "\tLv1 entry: %#x\n", *ent);
        if (lv1ent_page(ent)) {
                ent = page_entry(ent, fault_addr);
-               dev_err(data->sysmmu, "\t Lv2 entry: %#x\n", *ent);
+               dev_dbg(data->sysmmu, "\t Lv2 entry: %#x\n", *ent);
        }
 }
 
@@ -611,6 +614,18 @@ static int __init exynos_sysmmu_probe(struct platform_device *pdev)
        data->sysmmu = dev;
        spin_lock_init(&data->lock);
 
+       ret = iommu_device_sysfs_add(&data->iommu, &pdev->dev, NULL,
+                                    dev_name(data->sysmmu));
+       if (ret)
+               return ret;
+
+       iommu_device_set_ops(&data->iommu, &exynos_iommu_ops);
+       iommu_device_set_fwnode(&data->iommu, &dev->of_node->fwnode);
+
+       ret = iommu_device_register(&data->iommu);
+       if (ret)
+               return ret;
+
        platform_set_drvdata(pdev, data);
 
        __sysmmu_get_version(data);
@@ -628,8 +643,6 @@ static int __init exynos_sysmmu_probe(struct platform_device *pdev)
 
        pm_runtime_enable(dev);
 
-       of_iommu_set_ops(dev->of_node, &exynos_iommu_ops);
-
        return 0;
 }
 
@@ -743,6 +756,8 @@ static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type)
                                DMA_TO_DEVICE);
        /* For mapping page table entries we rely on dma == phys */
        BUG_ON(handle != virt_to_phys(domain->pgtable));
+       if (dma_mapping_error(dma_dev, handle))
+               goto err_lv2ent;
 
        spin_lock_init(&domain->lock);
        spin_lock_init(&domain->pgtablelock);
@@ -754,6 +769,8 @@ static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type)
 
        return &domain->domain;
 
+err_lv2ent:
+       free_pages((unsigned long)domain->lv2entcnt, 1);
 err_counter:
        free_pages((unsigned long)domain->pgtable, 2);
 err_dma_cookie:
@@ -897,6 +914,7 @@ static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain,
        }
 
        if (lv1ent_fault(sent)) {
+               dma_addr_t handle;
                sysmmu_pte_t *pent;
                bool need_flush_flpd_cache = lv1ent_zero(sent);
 
@@ -908,7 +926,12 @@ static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain,
                update_pte(sent, mk_lv1ent_page(virt_to_phys(pent)));
                kmemleak_ignore(pent);
                *pgcounter = NUM_LV2ENTRIES;
-               dma_map_single(dma_dev, pent, LV2TABLE_SIZE, DMA_TO_DEVICE);
+               handle = dma_map_single(dma_dev, pent, LV2TABLE_SIZE,
+                                       DMA_TO_DEVICE);
+               if (dma_mapping_error(dma_dev, handle)) {
+                       kmem_cache_free(lv2table_kmem_cache, pent);
+                       return ERR_PTR(-EADDRINUSE);
+               }
 
                /*
                 * If pre-fetched SLPD is a faulty SLPD in zero_l2_table,
@@ -1231,9 +1254,21 @@ static int exynos_iommu_add_device(struct device *dev)
 
 static void exynos_iommu_remove_device(struct device *dev)
 {
+       struct exynos_iommu_owner *owner = dev->archdata.iommu;
+
        if (!has_sysmmu(dev))
                return;
 
+       if (owner->domain) {
+               struct iommu_group *group = iommu_group_get(dev);
+
+               if (group) {
+                       WARN_ON(owner->domain !=
+                               iommu_group_default_domain(group));
+                       exynos_iommu_detach_device(owner->domain, dev);
+                       iommu_group_put(group);
+               }
+       }
        iommu_group_remove_device(dev);
 }
 
@@ -1242,7 +1277,7 @@ static int exynos_iommu_of_xlate(struct device *dev,
 {
        struct exynos_iommu_owner *owner = dev->archdata.iommu;
        struct platform_device *sysmmu = of_find_device_by_node(spec->np);
-       struct sysmmu_drvdata *data;
+       struct sysmmu_drvdata *data, *entry;
 
        if (!sysmmu)
                return -ENODEV;
@@ -1261,6 +1296,10 @@ static int exynos_iommu_of_xlate(struct device *dev,
                dev->archdata.iommu = owner;
        }
 
+       list_for_each_entry(entry, &owner->controllers, owner_node)
+               if (entry == data)
+                       return 0;
+
        list_add_tail(&data->owner_node, &owner->controllers);
        data->master = dev;
 
index 8a185250ae5a5923d8ab9f34d811caa0f5e09b79..f5e02f8e737113123991607219ad23a12b2d1c54 100644 (file)
@@ -440,6 +440,7 @@ struct dmar_rmrr_unit {
        u64     end_address;            /* reserved end address */
        struct dmar_dev_scope *devices; /* target devices */
        int     devices_cnt;            /* target device count */
+       struct iommu_resv_region *resv; /* reserved region handle */
 };
 
 struct dmar_atsr_unit {
@@ -547,7 +548,7 @@ EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
 static DEFINE_SPINLOCK(device_domain_lock);
 static LIST_HEAD(device_domain_list);
 
-static const struct iommu_ops intel_iommu_ops;
+const struct iommu_ops intel_iommu_ops;
 
 static bool translation_pre_enabled(struct intel_iommu *iommu)
 {
@@ -1144,7 +1145,7 @@ static void dma_pte_free_level(struct dmar_domain *domain, int level,
                if (!dma_pte_present(pte) || dma_pte_superpage(pte))
                        goto next;
 
-               level_pfn = pfn & level_mask(level - 1);
+               level_pfn = pfn & level_mask(level);
                level_pte = phys_to_virt(dma_pte_addr(pte));
 
                if (level > 2)
@@ -3325,13 +3326,14 @@ static int __init init_dmars(void)
        iommu_identity_mapping |= IDENTMAP_GFX;
 #endif
 
+       check_tylersburg_isoch();
+
        if (iommu_identity_mapping) {
                ret = si_domain_init(hw_pass_through);
                if (ret)
                        goto free_iommu;
        }
 
-       check_tylersburg_isoch();
 
        /*
         * If we copied translations from a previous kernel in the kdump
@@ -4246,27 +4248,40 @@ static inline void init_iommu_pm_ops(void) {}
 int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
 {
        struct acpi_dmar_reserved_memory *rmrr;
+       int prot = DMA_PTE_READ|DMA_PTE_WRITE;
        struct dmar_rmrr_unit *rmrru;
+       size_t length;
 
        rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
        if (!rmrru)
-               return -ENOMEM;
+               goto out;
 
        rmrru->hdr = header;
        rmrr = (struct acpi_dmar_reserved_memory *)header;
        rmrru->base_address = rmrr->base_address;
        rmrru->end_address = rmrr->end_address;
+
+       length = rmrr->end_address - rmrr->base_address + 1;
+       rmrru->resv = iommu_alloc_resv_region(rmrr->base_address, length, prot,
+                                             IOMMU_RESV_DIRECT);
+       if (!rmrru->resv)
+               goto free_rmrru;
+
        rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
                                ((void *)rmrr) + rmrr->header.length,
                                &rmrru->devices_cnt);
-       if (rmrru->devices_cnt && rmrru->devices == NULL) {
-               kfree(rmrru);
-               return -ENOMEM;
-       }
+       if (rmrru->devices_cnt && rmrru->devices == NULL)
+               goto free_all;
 
        list_add(&rmrru->list, &dmar_rmrr_units);
 
        return 0;
+free_all:
+       kfree(rmrru->resv);
+free_rmrru:
+       kfree(rmrru);
+out:
+       return -ENOMEM;
 }
 
 static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
@@ -4480,6 +4495,7 @@ static void intel_iommu_free_dmars(void)
        list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
                list_del(&rmrru->list);
                dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
+               kfree(rmrru->resv);
                kfree(rmrru);
        }
 
@@ -4853,10 +4869,13 @@ int __init intel_iommu_init(void)
 
        init_iommu_pm_ops();
 
-       for_each_active_iommu(iommu, drhd)
-               iommu->iommu_dev = iommu_device_create(NULL, iommu,
-                                                      intel_iommu_groups,
-                                                      "%s", iommu->name);
+       for_each_active_iommu(iommu, drhd) {
+               iommu_device_sysfs_add(&iommu->iommu, NULL,
+                                      intel_iommu_groups,
+                                      "%s", iommu->name);
+               iommu_device_set_ops(&iommu->iommu, &intel_iommu_ops);
+               iommu_device_register(&iommu->iommu);
+       }
 
        bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
        bus_register_notifier(&pci_bus_type, &device_nb);
@@ -5178,7 +5197,7 @@ static int intel_iommu_add_device(struct device *dev)
        if (!iommu)
                return -ENODEV;
 
-       iommu_device_link(iommu->iommu_dev, dev);
+       iommu_device_link(&iommu->iommu, dev);
 
        group = iommu_group_get_for_dev(dev);
 
@@ -5200,7 +5219,46 @@ static void intel_iommu_remove_device(struct device *dev)
 
        iommu_group_remove_device(dev);
 
-       iommu_device_unlink(iommu->iommu_dev, dev);
+       iommu_device_unlink(&iommu->iommu, dev);
+}
+
+static void intel_iommu_get_resv_regions(struct device *device,
+                                        struct list_head *head)
+{
+       struct iommu_resv_region *reg;
+       struct dmar_rmrr_unit *rmrr;
+       struct device *i_dev;
+       int i;
+
+       rcu_read_lock();
+       for_each_rmrr_units(rmrr) {
+               for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
+                                         i, i_dev) {
+                       if (i_dev != device)
+                               continue;
+
+                       list_add_tail(&rmrr->resv->list, head);
+               }
+       }
+       rcu_read_unlock();
+
+       reg = iommu_alloc_resv_region(IOAPIC_RANGE_START,
+                                     IOAPIC_RANGE_END - IOAPIC_RANGE_START + 1,
+                                     0, IOMMU_RESV_RESERVED);
+       if (!reg)
+               return;
+       list_add_tail(&reg->list, head);
+}
+
+static void intel_iommu_put_resv_regions(struct device *dev,
+                                        struct list_head *head)
+{
+       struct iommu_resv_region *entry, *next;
+
+       list_for_each_entry_safe(entry, next, head, list) {
+               if (entry->type == IOMMU_RESV_RESERVED)
+                       kfree(entry);
+       }
 }
 
 #ifdef CONFIG_INTEL_IOMMU_SVM
@@ -5332,20 +5390,22 @@ struct intel_iommu *intel_svm_device_to_iommu(struct device *dev)
 }
 #endif /* CONFIG_INTEL_IOMMU_SVM */
 
-static const struct iommu_ops intel_iommu_ops = {
-       .capable        = intel_iommu_capable,
-       .domain_alloc   = intel_iommu_domain_alloc,
-       .domain_free    = intel_iommu_domain_free,
-       .attach_dev     = intel_iommu_attach_device,
-       .detach_dev     = intel_iommu_detach_device,
-       .map            = intel_iommu_map,
-       .unmap          = intel_iommu_unmap,
-       .map_sg         = default_iommu_map_sg,
-       .iova_to_phys   = intel_iommu_iova_to_phys,
-       .add_device     = intel_iommu_add_device,
-       .remove_device  = intel_iommu_remove_device,
-       .device_group   = pci_device_group,
-       .pgsize_bitmap  = INTEL_IOMMU_PGSIZES,
+const struct iommu_ops intel_iommu_ops = {
+       .capable                = intel_iommu_capable,
+       .domain_alloc           = intel_iommu_domain_alloc,
+       .domain_free            = intel_iommu_domain_free,
+       .attach_dev             = intel_iommu_attach_device,
+       .detach_dev             = intel_iommu_detach_device,
+       .map                    = intel_iommu_map,
+       .unmap                  = intel_iommu_unmap,
+       .map_sg                 = default_iommu_map_sg,
+       .iova_to_phys           = intel_iommu_iova_to_phys,
+       .add_device             = intel_iommu_add_device,
+       .remove_device          = intel_iommu_remove_device,
+       .get_resv_regions       = intel_iommu_get_resv_regions,
+       .put_resv_regions       = intel_iommu_put_resv_regions,
+       .device_group           = pci_device_group,
+       .pgsize_bitmap          = INTEL_IOMMU_PGSIZES,
 };
 
 static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
index 0769276c0537c8157f3845dcb4d577998ebb5d77..1c049e2e12bf0ddacbc0e8ff9cbb09751996a549 100644 (file)
@@ -265,7 +265,9 @@ static arm_v7s_iopte arm_v7s_prot_to_pte(int prot, int lvl,
        if (!(prot & IOMMU_MMIO))
                pte |= ARM_V7S_ATTR_TEX(1);
        if (ap) {
-               pte |= ARM_V7S_PTE_AF | ARM_V7S_PTE_AP_UNPRIV;
+               pte |= ARM_V7S_PTE_AF;
+               if (!(prot & IOMMU_PRIV))
+                       pte |= ARM_V7S_PTE_AP_UNPRIV;
                if (!(prot & IOMMU_WRITE))
                        pte |= ARM_V7S_PTE_AP_RDONLY;
        }
@@ -288,6 +290,8 @@ static int arm_v7s_pte_to_prot(arm_v7s_iopte pte, int lvl)
 
        if (!(attr & ARM_V7S_PTE_AP_RDONLY))
                prot |= IOMMU_WRITE;
+       if (!(attr & ARM_V7S_PTE_AP_UNPRIV))
+               prot |= IOMMU_PRIV;
        if ((attr & (ARM_V7S_TEX_MASK << ARM_V7S_TEX_SHIFT)) == 0)
                prot |= IOMMU_MMIO;
        else if (pte & ARM_V7S_ATTR_C)
index a40ce3406fef19e8fb446fe9f089a6d5fb865178..feacc54bec683b535fcba37e47ecb46af014ef5a 100644 (file)
@@ -350,11 +350,14 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
 
        if (data->iop.fmt == ARM_64_LPAE_S1 ||
            data->iop.fmt == ARM_32_LPAE_S1) {
-               pte = ARM_LPAE_PTE_AP_UNPRIV | ARM_LPAE_PTE_nG;
+               pte = ARM_LPAE_PTE_nG;
 
                if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
                        pte |= ARM_LPAE_PTE_AP_RDONLY;
 
+               if (!(prot & IOMMU_PRIV))
+                       pte |= ARM_LPAE_PTE_AP_UNPRIV;
+
                if (prot & IOMMU_MMIO)
                        pte |= (ARM_LPAE_MAIR_ATTR_IDX_DEV
                                << ARM_LPAE_PTE_ATTRINDX_SHIFT);
index 39b2d9127dbf80f49a8432610da217c9dabb042e..c58351ed61c14309c7a72346bd56f659a7039637 100644 (file)
@@ -50,85 +50,76 @@ static int __init iommu_dev_init(void)
 postcore_initcall(iommu_dev_init);
 
 /*
- * Create an IOMMU device and return a pointer to it.  IOMMU specific
- * attributes can be provided as an attribute group, allowing a unique
- * namespace per IOMMU type.
+ * Init the struct device for the IOMMU. IOMMU specific attributes can
+ * be provided as an attribute group, allowing a unique namespace per
+ * IOMMU type.
  */
-struct device *iommu_device_create(struct device *parent, void *drvdata,
-                                  const struct attribute_group **groups,
-                                  const char *fmt, ...)
+int iommu_device_sysfs_add(struct iommu_device *iommu,
+                          struct device *parent,
+                          const struct attribute_group **groups,
+                          const char *fmt, ...)
 {
-       struct device *dev;
        va_list vargs;
        int ret;
 
-       dev = kzalloc(sizeof(*dev), GFP_KERNEL);
-       if (!dev)
-               return ERR_PTR(-ENOMEM);
+       device_initialize(&iommu->dev);
 
-       device_initialize(dev);
-
-       dev->class = &iommu_class;
-       dev->parent = parent;
-       dev->groups = groups;
-       dev_set_drvdata(dev, drvdata);
+       iommu->dev.class = &iommu_class;
+       iommu->dev.parent = parent;
+       iommu->dev.groups = groups;
 
        va_start(vargs, fmt);
-       ret = kobject_set_name_vargs(&dev->kobj, fmt, vargs);
+       ret = kobject_set_name_vargs(&iommu->dev.kobj, fmt, vargs);
        va_end(vargs);
        if (ret)
                goto error;
 
-       ret = device_add(dev);
+       ret = device_add(&iommu->dev);
        if (ret)
                goto error;
 
-       return dev;
+       return 0;
 
 error:
-       put_device(dev);
-       return ERR_PTR(ret);
+       put_device(&iommu->dev);
+       return ret;
 }
 
-void iommu_device_destroy(struct device *dev)
+void iommu_device_sysfs_remove(struct iommu_device *iommu)
 {
-       if (!dev || IS_ERR(dev))
-               return;
-
-       device_unregister(dev);
+       device_unregister(&iommu->dev);
 }
-
 /*
  * IOMMU drivers can indicate a device is managed by a given IOMMU using
  * this interface.  A link to the device will be created in the "devices"
  * directory of the IOMMU device in sysfs and an "iommu" link will be
  * created under the linked device, pointing back at the IOMMU device.
  */
-int iommu_device_link(struct device *dev, struct device *link)
+int iommu_device_link(struct iommu_device *iommu, struct device *link)
 {
        int ret;
 
-       if (!dev || IS_ERR(dev))
+       if (!iommu || IS_ERR(iommu))
                return -ENODEV;
 
-       ret = sysfs_add_link_to_group(&dev->kobj, "devices",
+       ret = sysfs_add_link_to_group(&iommu->dev.kobj, "devices",
                                      &link->kobj, dev_name(link));
        if (ret)
                return ret;
 
-       ret = sysfs_create_link_nowarn(&link->kobj, &dev->kobj, "iommu");
+       ret = sysfs_create_link_nowarn(&link->kobj, &iommu->dev.kobj, "iommu");
        if (ret)
-               sysfs_remove_link_from_group(&dev->kobj, "devices",
+               sysfs_remove_link_from_group(&iommu->dev.kobj, "devices",
                                             dev_name(link));
 
        return ret;
 }
 
-void iommu_device_unlink(struct device *dev, struct device *link)
+void iommu_device_unlink(struct iommu_device *iommu, struct device *link)
 {
-       if (!dev || IS_ERR(dev))
+       if (!iommu || IS_ERR(iommu))
                return;
 
        sysfs_remove_link(&link->kobj, "iommu");
-       sysfs_remove_link_from_group(&dev->kobj, "devices", dev_name(link));
+       sysfs_remove_link_from_group(&iommu->dev.kobj, "devices", dev_name(link));
 }
index dbe7f653bb7cb6d2db0b9ecf9d8586dc719cb808..8ea14f41a979fd4e72e3a6093e5fa8d2a0eff24a 100644 (file)
@@ -55,7 +55,7 @@ struct iommu_group {
        struct iommu_domain *domain;
 };
 
-struct iommu_device {
+struct group_device {
        struct list_head list;
        struct device *dev;
        char *name;
@@ -68,6 +68,12 @@ struct iommu_group_attribute {
                         const char *buf, size_t count);
 };
 
+static const char * const iommu_group_resv_type_string[] = {
+       [IOMMU_RESV_DIRECT]     = "direct",
+       [IOMMU_RESV_RESERVED]   = "reserved",
+       [IOMMU_RESV_MSI]        = "msi",
+};
+
 #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store)          \
 struct iommu_group_attribute iommu_group_attr_##_name =                \
        __ATTR(_name, _mode, _show, _store)
@@ -77,6 +83,25 @@ struct iommu_group_attribute iommu_group_attr_##_name =              \
 #define to_iommu_group(_kobj)          \
        container_of(_kobj, struct iommu_group, kobj)
 
+static LIST_HEAD(iommu_device_list);
+static DEFINE_SPINLOCK(iommu_device_lock);
+
+int iommu_device_register(struct iommu_device *iommu)
+{
+       spin_lock(&iommu_device_lock);
+       list_add_tail(&iommu->list, &iommu_device_list);
+       spin_unlock(&iommu_device_lock);
+
+       return 0;
+}
+
+void iommu_device_unregister(struct iommu_device *iommu)
+{
+       spin_lock(&iommu_device_lock);
+       list_del(&iommu->list);
+       spin_unlock(&iommu_device_lock);
+}
+
 static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
                                                 unsigned type);
 static int __iommu_attach_device(struct iommu_domain *domain,
@@ -133,8 +158,131 @@ static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf)
        return sprintf(buf, "%s\n", group->name);
 }
 
+/**
+ * iommu_insert_resv_region - Insert a new region in the
+ * list of reserved regions.
+ * @new: new region to insert
+ * @regions: list of regions
+ *
+ * The new element is sorted by address with respect to the other
+ * regions of the same type. In case it overlaps with another
+ * region of the same type, regions are merged. In case it
+ * overlaps with another region of different type, regions are
+ * not merged.
+ */
+static int iommu_insert_resv_region(struct iommu_resv_region *new,
+                                   struct list_head *regions)
+{
+       struct iommu_resv_region *region;
+       phys_addr_t start = new->start;
+       phys_addr_t end = new->start + new->length - 1;
+       struct list_head *pos = regions->next;
+
+       while (pos != regions) {
+               struct iommu_resv_region *entry =
+                       list_entry(pos, struct iommu_resv_region, list);
+               phys_addr_t a = entry->start;
+               phys_addr_t b = entry->start + entry->length - 1;
+               int type = entry->type;
+
+               if (end < a) {
+                       goto insert;
+               } else if (start > b) {
+                       pos = pos->next;
+               } else if ((start >= a) && (end <= b)) {
+                       if (new->type == type)
+                               goto done;
+                       else
+                               pos = pos->next;
+               } else {
+                       if (new->type == type) {
+                               phys_addr_t new_start = min(a, start);
+                               phys_addr_t new_end = max(b, end);
+
+                               list_del(&entry->list);
+                               entry->start = new_start;
+                               entry->length = new_end - new_start + 1;
+                               iommu_insert_resv_region(entry, regions);
+                       } else {
+                               pos = pos->next;
+                       }
+               }
+       }
+insert:
+       region = iommu_alloc_resv_region(new->start, new->length,
+                                        new->prot, new->type);
+       if (!region)
+               return -ENOMEM;
+
+       list_add_tail(&region->list, pos);
+done:
+       return 0;
+}
+
+static int
+iommu_insert_device_resv_regions(struct list_head *dev_resv_regions,
+                                struct list_head *group_resv_regions)
+{
+       struct iommu_resv_region *entry;
+       int ret = 0;
+
+       list_for_each_entry(entry, dev_resv_regions, list) {
+               ret = iommu_insert_resv_region(entry, group_resv_regions);
+               if (ret)
+                       break;
+       }
+       return ret;
+}
+
+int iommu_get_group_resv_regions(struct iommu_group *group,
+                                struct list_head *head)
+{
+       struct group_device *device;
+       int ret = 0;
+
+       mutex_lock(&group->mutex);
+       list_for_each_entry(device, &group->devices, list) {
+               struct list_head dev_resv_regions;
+
+               INIT_LIST_HEAD(&dev_resv_regions);
+               iommu_get_resv_regions(device->dev, &dev_resv_regions);
+               ret = iommu_insert_device_resv_regions(&dev_resv_regions, head);
+               iommu_put_resv_regions(device->dev, &dev_resv_regions);
+               if (ret)
+                       break;
+       }
+       mutex_unlock(&group->mutex);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(iommu_get_group_resv_regions);
+
+static ssize_t iommu_group_show_resv_regions(struct iommu_group *group,
+                                            char *buf)
+{
+       struct iommu_resv_region *region, *next;
+       struct list_head group_resv_regions;
+       char *str = buf;
+
+       INIT_LIST_HEAD(&group_resv_regions);
+       iommu_get_group_resv_regions(group, &group_resv_regions);
+
+       list_for_each_entry_safe(region, next, &group_resv_regions, list) {
+               str += sprintf(str, "0x%016llx 0x%016llx %s\n",
+                              (long long int)region->start,
+                              (long long int)(region->start +
+                                               region->length - 1),
+                              iommu_group_resv_type_string[region->type]);
+               kfree(region);
+       }
+
+       return (str - buf);
+}
+
 static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL);
 
+static IOMMU_GROUP_ATTR(reserved_regions, 0444,
+                       iommu_group_show_resv_regions, NULL);
+
 static void iommu_group_release(struct kobject *kobj)
 {
        struct iommu_group *group = to_iommu_group(kobj);
@@ -212,6 +360,11 @@ struct iommu_group *iommu_group_alloc(void)
         */
        kobject_put(&group->kobj);
 
+       ret = iommu_group_create_file(group,
+                                     &iommu_group_attr_reserved_regions);
+       if (ret)
+               return ERR_PTR(ret);
+
        pr_debug("Allocated group %d\n", group->id);
 
        return group;
@@ -318,7 +471,7 @@ static int iommu_group_create_direct_mappings(struct iommu_group *group,
                                              struct device *dev)
 {
        struct iommu_domain *domain = group->default_domain;
-       struct iommu_dm_region *entry;
+       struct iommu_resv_region *entry;
        struct list_head mappings;
        unsigned long pg_size;
        int ret = 0;
@@ -331,18 +484,21 @@ static int iommu_group_create_direct_mappings(struct iommu_group *group,
        pg_size = 1UL << __ffs(domain->pgsize_bitmap);
        INIT_LIST_HEAD(&mappings);
 
-       iommu_get_dm_regions(dev, &mappings);
+       iommu_get_resv_regions(dev, &mappings);
 
        /* We need to consider overlapping regions for different devices */
        list_for_each_entry(entry, &mappings, list) {
                dma_addr_t start, end, addr;
 
-               if (domain->ops->apply_dm_region)
-                       domain->ops->apply_dm_region(dev, domain, entry);
+               if (domain->ops->apply_resv_region)
+                       domain->ops->apply_resv_region(dev, domain, entry);
 
                start = ALIGN(entry->start, pg_size);
                end   = ALIGN(entry->start + entry->length, pg_size);
 
+               if (entry->type != IOMMU_RESV_DIRECT)
+                       continue;
+
                for (addr = start; addr < end; addr += pg_size) {
                        phys_addr_t phys_addr;
 
@@ -358,7 +514,7 @@ static int iommu_group_create_direct_mappings(struct iommu_group *group,
        }
 
 out:
-       iommu_put_dm_regions(dev, &mappings);
+       iommu_put_resv_regions(dev, &mappings);
 
        return ret;
 }
@@ -374,7 +530,7 @@ out:
 int iommu_group_add_device(struct iommu_group *group, struct device *dev)
 {
        int ret, i = 0;
-       struct iommu_device *device;
+       struct group_device *device;
 
        device = kzalloc(sizeof(*device), GFP_KERNEL);
        if (!device)
@@ -383,36 +539,30 @@ int iommu_group_add_device(struct iommu_group *group, struct device *dev)
        device->dev = dev;
 
        ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group");
-       if (ret) {
-               kfree(device);
-               return ret;
-       }
+       if (ret)
+               goto err_free_device;
 
        device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj));
 rename:
        if (!device->name) {
-               sysfs_remove_link(&dev->kobj, "iommu_group");
-               kfree(device);
-               return -ENOMEM;
+               ret = -ENOMEM;
+               goto err_remove_link;
        }
 
        ret = sysfs_create_link_nowarn(group->devices_kobj,
                                       &dev->kobj, device->name);
        if (ret) {
-               kfree(device->name);
                if (ret == -EEXIST && i >= 0) {
                        /*
                         * Account for the slim chance of collision
                         * and append an instance to the name.
                         */
+                       kfree(device->name);
                        device->name = kasprintf(GFP_KERNEL, "%s.%d",
                                                 kobject_name(&dev->kobj), i++);
                        goto rename;
                }
-
-               sysfs_remove_link(&dev->kobj, "iommu_group");
-               kfree(device);
-               return ret;
+               goto err_free_name;
        }
 
        kobject_get(group->devices_kobj);
@@ -424,8 +574,10 @@ rename:
        mutex_lock(&group->mutex);
        list_add_tail(&device->list, &group->devices);
        if (group->domain)
-               __iommu_attach_device(group->domain, dev);
+               ret = __iommu_attach_device(group->domain, dev);
        mutex_unlock(&group->mutex);
+       if (ret)
+               goto err_put_group;
 
        /* Notify any listeners about change to group. */
        blocking_notifier_call_chain(&group->notifier,
@@ -436,6 +588,21 @@ rename:
        pr_info("Adding device %s to group %d\n", dev_name(dev), group->id);
 
        return 0;
+
+err_put_group:
+       mutex_lock(&group->mutex);
+       list_del(&device->list);
+       mutex_unlock(&group->mutex);
+       dev->iommu_group = NULL;
+       kobject_put(group->devices_kobj);
+err_free_name:
+       kfree(device->name);
+err_remove_link:
+       sysfs_remove_link(&dev->kobj, "iommu_group");
+err_free_device:
+       kfree(device);
+       pr_err("Failed to add device %s to group %d: %d\n", dev_name(dev), group->id, ret);
+       return ret;
 }
 EXPORT_SYMBOL_GPL(iommu_group_add_device);
 
@@ -449,7 +616,7 @@ EXPORT_SYMBOL_GPL(iommu_group_add_device);
 void iommu_group_remove_device(struct device *dev)
 {
        struct iommu_group *group = dev->iommu_group;
-       struct iommu_device *tmp_device, *device = NULL;
+       struct group_device *tmp_device, *device = NULL;
 
        pr_info("Removing device %s from group %d\n", dev_name(dev), group->id);
 
@@ -484,7 +651,7 @@ EXPORT_SYMBOL_GPL(iommu_group_remove_device);
 
 static int iommu_group_device_count(struct iommu_group *group)
 {
-       struct iommu_device *entry;
+       struct group_device *entry;
        int ret = 0;
 
        list_for_each_entry(entry, &group->devices, list)
@@ -507,7 +674,7 @@ static int iommu_group_device_count(struct iommu_group *group)
 static int __iommu_group_for_each_dev(struct iommu_group *group, void *data,
                                      int (*fn)(struct device *, void *))
 {
-       struct iommu_device *device;
+       struct group_device *device;
        int ret = 0;
 
        list_for_each_entry(device, &group->devices, list) {
@@ -1559,20 +1726,38 @@ int iommu_domain_set_attr(struct iommu_domain *domain,
 }
 EXPORT_SYMBOL_GPL(iommu_domain_set_attr);
 
-void iommu_get_dm_regions(struct device *dev, struct list_head *list)
+void iommu_get_resv_regions(struct device *dev, struct list_head *list)
 {
        const struct iommu_ops *ops = dev->bus->iommu_ops;
 
-       if (ops && ops->get_dm_regions)
-               ops->get_dm_regions(dev, list);
+       if (ops && ops->get_resv_regions)
+               ops->get_resv_regions(dev, list);
 }
 
-void iommu_put_dm_regions(struct device *dev, struct list_head *list)
+void iommu_put_resv_regions(struct device *dev, struct list_head *list)
 {
        const struct iommu_ops *ops = dev->bus->iommu_ops;
 
-       if (ops && ops->put_dm_regions)
-               ops->put_dm_regions(dev, list);
+       if (ops && ops->put_resv_regions)
+               ops->put_resv_regions(dev, list);
+}
+
+struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start,
+                                                 size_t length,
+                                                 int prot, int type)
+{
+       struct iommu_resv_region *region;
+
+       region = kzalloc(sizeof(*region), GFP_KERNEL);
+       if (!region)
+               return NULL;
+
+       INIT_LIST_HEAD(&region->list);
+       region->start = start;
+       region->length = length;
+       region->prot = prot;
+       region->type = type;
+       return region;
 }
 
 /* Request that a device is direct mapped by the IOMMU */
@@ -1628,43 +1813,18 @@ out:
        return ret;
 }
 
-struct iommu_instance {
-       struct list_head list;
-       struct fwnode_handle *fwnode;
-       const struct iommu_ops *ops;
-};
-static LIST_HEAD(iommu_instance_list);
-static DEFINE_SPINLOCK(iommu_instance_lock);
-
-void iommu_register_instance(struct fwnode_handle *fwnode,
-                            const struct iommu_ops *ops)
+const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode)
 {
-       struct iommu_instance *iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
-
-       if (WARN_ON(!iommu))
-               return;
-
-       of_node_get(to_of_node(fwnode));
-       INIT_LIST_HEAD(&iommu->list);
-       iommu->fwnode = fwnode;
-       iommu->ops = ops;
-       spin_lock(&iommu_instance_lock);
-       list_add_tail(&iommu->list, &iommu_instance_list);
-       spin_unlock(&iommu_instance_lock);
-}
-
-const struct iommu_ops *iommu_get_instance(struct fwnode_handle *fwnode)
-{
-       struct iommu_instance *instance;
        const struct iommu_ops *ops = NULL;
+       struct iommu_device *iommu;
 
-       spin_lock(&iommu_instance_lock);
-       list_for_each_entry(instance, &iommu_instance_list, list)
-               if (instance->fwnode == fwnode) {
-                       ops = instance->ops;
+       spin_lock(&iommu_device_lock);
+       list_for_each_entry(iommu, &iommu_device_list, list)
+               if (iommu->fwnode == fwnode) {
+                       ops = iommu->ops;
                        break;
                }
-       spin_unlock(&iommu_instance_lock);
+       spin_unlock(&iommu_device_lock);
        return ops;
 }
 
@@ -1714,13 +1874,14 @@ int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids)
                fwspec = krealloc(dev->iommu_fwspec, size, GFP_KERNEL);
                if (!fwspec)
                        return -ENOMEM;
+
+               dev->iommu_fwspec = fwspec;
        }
 
        for (i = 0; i < num_ids; i++)
                fwspec->ids[fwspec->num_ids + i] = ids[i];
 
        fwspec->num_ids += num_ids;
-       dev->iommu_fwspec = fwspec;
        return 0;
 }
 EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids);
index 080beca0197dcec22802dfc7fc0aebbbfec3c023..b7268a14184f220bef244f38bfcc2d13900c443a 100644 (file)
@@ -62,7 +62,7 @@ __get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn)
        else {
                struct rb_node *prev_node = rb_prev(iovad->cached32_node);
                struct iova *curr_iova =
-                       container_of(iovad->cached32_node, struct iova, node);
+                       rb_entry(iovad->cached32_node, struct iova, node);
                *limit_pfn = curr_iova->pfn_lo - 1;
                return prev_node;
        }
@@ -86,11 +86,11 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
        if (!iovad->cached32_node)
                return;
        curr = iovad->cached32_node;
-       cached_iova = container_of(curr, struct iova, node);
+       cached_iova = rb_entry(curr, struct iova, node);
 
        if (free->pfn_lo >= cached_iova->pfn_lo) {
                struct rb_node *node = rb_next(&free->node);
-               struct iova *iova = container_of(node, struct iova, node);
+               struct iova *iova = rb_entry(node, struct iova, node);
 
                /* only cache if it's below 32bit pfn */
                if (node && iova->pfn_lo < iovad->dma_32bit_pfn)
@@ -125,7 +125,7 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
        curr = __get_cached_rbnode(iovad, &limit_pfn);
        prev = curr;
        while (curr) {
-               struct iova *curr_iova = container_of(curr, struct iova, node);
+               struct iova *curr_iova = rb_entry(curr, struct iova, node);
 
                if (limit_pfn < curr_iova->pfn_lo)
                        goto move_left;
@@ -171,8 +171,7 @@ move_left:
 
                /* Figure out where to put new node */
                while (*entry) {
-                       struct iova *this = container_of(*entry,
-                                                       struct iova, node);
+                       struct iova *this = rb_entry(*entry, struct iova, node);
                        parent = *entry;
 
                        if (new->pfn_lo < this->pfn_lo)
@@ -201,7 +200,7 @@ iova_insert_rbtree(struct rb_root *root, struct iova *iova)
        struct rb_node **new = &(root->rb_node), *parent = NULL;
        /* Figure out where to put new node */
        while (*new) {
-               struct iova *this = container_of(*new, struct iova, node);
+               struct iova *this = rb_entry(*new, struct iova, node);
 
                parent = *new;
 
@@ -311,7 +310,7 @@ private_find_iova(struct iova_domain *iovad, unsigned long pfn)
        assert_spin_locked(&iovad->iova_rbtree_lock);
 
        while (node) {
-               struct iova *iova = container_of(node, struct iova, node);
+               struct iova *iova = rb_entry(node, struct iova, node);
 
                /* If pfn falls within iova's range, return iova */
                if ((pfn >= iova->pfn_lo) && (pfn <= iova->pfn_hi)) {
@@ -463,7 +462,7 @@ void put_iova_domain(struct iova_domain *iovad)
        spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
        node = rb_first(&iovad->rbroot);
        while (node) {
-               struct iova *iova = container_of(node, struct iova, node);
+               struct iova *iova = rb_entry(node, struct iova, node);
 
                rb_erase(node, &iovad->rbroot);
                free_iova_mem(iova);
@@ -477,7 +476,7 @@ static int
 __is_range_overlap(struct rb_node *node,
        unsigned long pfn_lo, unsigned long pfn_hi)
 {
-       struct iova *iova = container_of(node, struct iova, node);
+       struct iova *iova = rb_entry(node, struct iova, node);
 
        if ((pfn_lo <= iova->pfn_hi) && (pfn_hi >= iova->pfn_lo))
                return 1;
@@ -541,7 +540,7 @@ reserve_iova(struct iova_domain *iovad,
        spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
        for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) {
                if (__is_range_overlap(node, pfn_lo, pfn_hi)) {
-                       iova = container_of(node, struct iova, node);
+                       iova = rb_entry(node, struct iova, node);
                        __adjust_overlap_range(iova, &pfn_lo, &pfn_hi);
                        if ((pfn_lo >= iova->pfn_lo) &&
                                (pfn_hi <= iova->pfn_hi))
@@ -578,7 +577,7 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
 
        spin_lock_irqsave(&from->iova_rbtree_lock, flags);
        for (node = rb_first(&from->rbroot); node; node = rb_next(node)) {
-               struct iova *iova = container_of(node, struct iova, node);
+               struct iova *iova = rb_entry(node, struct iova, node);
                struct iova *new_iova;
 
                new_iova = reserve_iova(to, iova->pfn_lo, iova->pfn_hi);
index ace331da6459473685016aa8ac53601fe9c8ca84..b7e14ee863f92446997a66fc4b7532f1b8d93355 100644 (file)
@@ -313,6 +313,8 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
        domain->cfg.ias = 32;
        domain->cfg.oas = 40;
        domain->cfg.tlb = &ipmmu_gather_ops;
+       domain->io_domain.geometry.aperture_end = DMA_BIT_MASK(32);
+       domain->io_domain.geometry.force_aperture = true;
        /*
         * TODO: Add support for coherent walk through CCI with DVM and remove
         * cache handling. For now, delegate it to the io-pgtable code.
index b09692bb5b0a209c321401253a853c652ed374e2..d0448353d5011dae8376f43569e169c8051001db 100644 (file)
@@ -371,6 +371,58 @@ static int msm_iommu_domain_config(struct msm_priv *priv)
        return 0;
 }
 
+/* Must be called under msm_iommu_lock */
+static struct msm_iommu_dev *find_iommu_for_dev(struct device *dev)
+{
+       struct msm_iommu_dev *iommu, *ret = NULL;
+       struct msm_iommu_ctx_dev *master;
+
+       list_for_each_entry(iommu, &qcom_iommu_devices, dev_node) {
+               master = list_first_entry(&iommu->ctx_list,
+                                         struct msm_iommu_ctx_dev,
+                                         list);
+               if (master->of_node == dev->of_node) {
+                       ret = iommu;
+                       break;
+               }
+       }
+
+       return ret;
+}
+
+static int msm_iommu_add_device(struct device *dev)
+{
+       struct msm_iommu_dev *iommu;
+       unsigned long flags;
+       int ret = 0;
+
+       spin_lock_irqsave(&msm_iommu_lock, flags);
+
+       iommu = find_iommu_for_dev(dev);
+       if (iommu)
+               iommu_device_link(&iommu->iommu, dev);
+       else
+               ret = -ENODEV;
+
+       spin_unlock_irqrestore(&msm_iommu_lock, flags);
+
+       return ret;
+}
+
+static void msm_iommu_remove_device(struct device *dev)
+{
+       struct msm_iommu_dev *iommu;
+       unsigned long flags;
+
+       spin_lock_irqsave(&msm_iommu_lock, flags);
+
+       iommu = find_iommu_for_dev(dev);
+       if (iommu)
+               iommu_device_unlink(&iommu->iommu, dev);
+
+       spin_unlock_irqrestore(&msm_iommu_lock, flags);
+}
+
 static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
 {
        int ret = 0;
@@ -646,6 +698,8 @@ static struct iommu_ops msm_iommu_ops = {
        .unmap = msm_iommu_unmap,
        .map_sg = default_iommu_map_sg,
        .iova_to_phys = msm_iommu_iova_to_phys,
+       .add_device = msm_iommu_add_device,
+       .remove_device = msm_iommu_remove_device,
        .pgsize_bitmap = MSM_IOMMU_PGSIZES,
        .of_xlate = qcom_iommu_of_xlate,
 };
@@ -653,6 +707,7 @@ static struct iommu_ops msm_iommu_ops = {
 static int msm_iommu_probe(struct platform_device *pdev)
 {
        struct resource *r;
+       resource_size_t ioaddr;
        struct msm_iommu_dev *iommu;
        int ret, par, val;
 
@@ -696,6 +751,7 @@ static int msm_iommu_probe(struct platform_device *pdev)
                ret = PTR_ERR(iommu->base);
                goto fail;
        }
+       ioaddr = r->start;
 
        iommu->irq = platform_get_irq(pdev, 0);
        if (iommu->irq < 0) {
@@ -737,7 +793,22 @@ static int msm_iommu_probe(struct platform_device *pdev)
        }
 
        list_add(&iommu->dev_node, &qcom_iommu_devices);
-       of_iommu_set_ops(pdev->dev.of_node, &msm_iommu_ops);
+
+       ret = iommu_device_sysfs_add(&iommu->iommu, iommu->dev, NULL,
+                                    "msm-smmu.%pa", &ioaddr);
+       if (ret) {
+               pr_err("Could not add msm-smmu at %pa to sysfs\n", &ioaddr);
+               goto fail;
+       }
+
+       iommu_device_set_ops(&iommu->iommu, &msm_iommu_ops);
+       iommu_device_set_fwnode(&iommu->iommu, &pdev->dev.of_node->fwnode);
+
+       ret = iommu_device_register(&iommu->iommu);
+       if (ret) {
+               pr_err("Could not register msm-smmu at %pa\n", &ioaddr);
+               goto fail;
+       }
 
        pr_info("device mapped at %p, irq %d with %d ctx banks\n",
                iommu->base, iommu->irq, iommu->ncb);
index 4ca25d50d679687d81aaac8b0913e3e1a18f6768..ae92d2779c420ad46aaee5afe3a13c3041f78b51 100644 (file)
@@ -19,6 +19,7 @@
 #define MSM_IOMMU_H
 
 #include <linux/interrupt.h>
+#include <linux/iommu.h>
 #include <linux/clk.h>
 
 /* Sharability attributes of MSM IOMMU mappings */
@@ -68,6 +69,8 @@ struct msm_iommu_dev {
        struct list_head dom_node;
        struct list_head ctx_list;
        DECLARE_BITMAP(context_map, IOMMU_MAX_CBS);
+
+       struct iommu_device iommu;
 };
 
 /**
index 1479c76ece9ec31793ec4d0f78a5611d3f09a6cb..5d14cd15198db5cb6361d060abf208260c086ebe 100644 (file)
@@ -360,11 +360,15 @@ static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
 
 static int mtk_iommu_add_device(struct device *dev)
 {
+       struct mtk_iommu_data *data;
        struct iommu_group *group;
 
        if (!dev->iommu_fwspec || dev->iommu_fwspec->ops != &mtk_iommu_ops)
                return -ENODEV; /* Not a iommu client device */
 
+       data = dev->iommu_fwspec->iommu_priv;
+       iommu_device_link(&data->iommu, dev);
+
        group = iommu_group_get_for_dev(dev);
        if (IS_ERR(group))
                return PTR_ERR(group);
@@ -375,9 +379,14 @@ static int mtk_iommu_add_device(struct device *dev)
 
 static void mtk_iommu_remove_device(struct device *dev)
 {
+       struct mtk_iommu_data *data;
+
        if (!dev->iommu_fwspec || dev->iommu_fwspec->ops != &mtk_iommu_ops)
                return;
 
+       data = dev->iommu_fwspec->iommu_priv;
+       iommu_device_unlink(&data->iommu, dev);
+
        iommu_group_remove_device(dev);
        iommu_fwspec_free(dev);
 }
@@ -497,6 +506,7 @@ static int mtk_iommu_probe(struct platform_device *pdev)
        struct mtk_iommu_data   *data;
        struct device           *dev = &pdev->dev;
        struct resource         *res;
+       resource_size_t         ioaddr;
        struct component_match  *match = NULL;
        void                    *protect;
        int                     i, larb_nr, ret;
@@ -519,6 +529,7 @@ static int mtk_iommu_probe(struct platform_device *pdev)
        data->base = devm_ioremap_resource(dev, res);
        if (IS_ERR(data->base))
                return PTR_ERR(data->base);
+       ioaddr = res->start;
 
        data->irq = platform_get_irq(pdev, 0);
        if (data->irq < 0)
@@ -567,6 +578,18 @@ static int mtk_iommu_probe(struct platform_device *pdev)
        if (ret)
                return ret;
 
+       ret = iommu_device_sysfs_add(&data->iommu, dev, NULL,
+                                    "mtk-iommu.%pa", &ioaddr);
+       if (ret)
+               return ret;
+
+       iommu_device_set_ops(&data->iommu, &mtk_iommu_ops);
+       iommu_device_set_fwnode(&data->iommu, &pdev->dev.of_node->fwnode);
+
+       ret = iommu_device_register(&data->iommu);
+       if (ret)
+               return ret;
+
        if (!iommu_present(&platform_bus_type))
                bus_set_iommu(&platform_bus_type, &mtk_iommu_ops);
 
@@ -577,6 +600,9 @@ static int mtk_iommu_remove(struct platform_device *pdev)
 {
        struct mtk_iommu_data *data = platform_get_drvdata(pdev);
 
+       iommu_device_sysfs_remove(&data->iommu);
+       iommu_device_unregister(&data->iommu);
+
        if (iommu_present(&platform_bus_type))
                bus_set_iommu(&platform_bus_type, NULL);
 
@@ -655,7 +681,6 @@ static int mtk_iommu_init_fn(struct device_node *np)
                return ret;
        }
 
-       of_iommu_set_ops(np, &mtk_iommu_ops);
        return 0;
 }
 
index 50177f738e4e012251795149241ecaff890dbbd7..2a28eadeea0ec3cf2ad363f7076f8555245520a2 100644 (file)
@@ -47,6 +47,8 @@ struct mtk_iommu_data {
        struct iommu_group              *m4u_group;
        struct mtk_smi_iommu            smi_imu;      /* SMI larb iommu info */
        bool                            enable_4GB;
+
+       struct iommu_device             iommu;
 };
 
 static inline int compare_of(struct device *dev, void *data)
index 0f57ddc4ecc274cbbe2be26618ec9fa90234ea9d..2683e9fc0dcf5cc0886034b7fccafb8861d958f1 100644 (file)
@@ -127,7 +127,7 @@ static const struct iommu_ops
                           "iommu-map-mask", &iommu_spec.np, iommu_spec.args))
                return NULL;
 
-       ops = of_iommu_get_ops(iommu_spec.np);
+       ops = iommu_ops_from_fwnode(&iommu_spec.np->fwnode);
        if (!ops || !ops->of_xlate ||
            iommu_fwspec_init(&pdev->dev, &iommu_spec.np->fwnode, ops) ||
            ops->of_xlate(&pdev->dev, &iommu_spec))
@@ -157,7 +157,7 @@ const struct iommu_ops *of_iommu_configure(struct device *dev,
                                           "#iommu-cells", idx,
                                           &iommu_spec)) {
                np = iommu_spec.np;
-               ops = of_iommu_get_ops(np);
+               ops = iommu_ops_from_fwnode(&np->fwnode);
 
                if (!ops || !ops->of_xlate ||
                    iommu_fwspec_init(dev, &np->fwnode, ops) ||
index ae96731cd2fbd4722d8c5f067592794b4c41bba1..125528f39e92c2377846f799a841e23ef1e98654 100644 (file)
@@ -283,3 +283,12 @@ config EZNPS_GIC
 config STM32_EXTI
        bool
        select IRQ_DOMAIN
+
+config QCOM_IRQ_COMBINER
+       bool "QCOM IRQ combiner support"
+       depends on ARCH_QCOM && ACPI
+       select IRQ_DOMAIN
+       select IRQ_DOMAIN_HIERARCHY
+       help
+         Say yes here to add support for the IRQ combiner devices embedded
+         in Qualcomm Technologies chips.
index 0e55d94065bf597c828aaa7d301132c7111d88f7..152bc40b6762a63cb50f4bbc084b019dcfebe556 100644 (file)
@@ -6,6 +6,7 @@ obj-$(CONFIG_ATH79)                     += irq-ath79-misc.o
 obj-$(CONFIG_ARCH_BCM2835)             += irq-bcm2835.o
 obj-$(CONFIG_ARCH_BCM2835)             += irq-bcm2836.o
 obj-$(CONFIG_ARCH_EXYNOS)              += exynos-combiner.o
+obj-$(CONFIG_ARCH_GEMINI)              += irq-gemini.o
 obj-$(CONFIG_ARCH_HIP04)               += irq-hip04.o
 obj-$(CONFIG_ARCH_LPC32XX)             += irq-lpc32xx.o
 obj-$(CONFIG_ARCH_MMP)                 += irq-mmp.o
@@ -75,3 +76,4 @@ obj-$(CONFIG_LS_SCFG_MSI)             += irq-ls-scfg-msi.o
 obj-$(CONFIG_EZNPS_GIC)                        += irq-eznps.o
 obj-$(CONFIG_ARCH_ASPEED)              += irq-aspeed-vic.o
 obj-$(CONFIG_STM32_EXTI)               += irq-stm32-exti.o
+obj-$(CONFIG_QCOM_IRQ_COMBINER)                += qcom-irq-combiner.o
diff --git a/drivers/irqchip/irq-gemini.c b/drivers/irqchip/irq-gemini.c
new file mode 100644 (file)
index 0000000..495224c
--- /dev/null
@@ -0,0 +1,185 @@
+/*
+ * irqchip for the Cortina Systems Gemini Copyright (C) 2017 Linus
+ * Walleij <linus.walleij@linaro.org>
+ *
+ * Based on arch/arm/mach-gemini/irq.c
+ * Copyright (C) 2001-2006 Storlink, Corp.
+ * Copyright (C) 2008-2009 Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
+ */
+#include <linux/bitops.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/versatile-fpga.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/cpu.h>
+
+#include <asm/exception.h>
+#include <asm/mach/irq.h>
+
+#define GEMINI_NUM_IRQS 32
+
+#define GEMINI_IRQ_SOURCE(base_addr)   (base_addr + 0x00)
+#define GEMINI_IRQ_MASK(base_addr)     (base_addr + 0x04)
+#define GEMINI_IRQ_CLEAR(base_addr)    (base_addr + 0x08)
+#define GEMINI_IRQ_MODE(base_addr)     (base_addr + 0x0C)
+#define GEMINI_IRQ_POLARITY(base_addr) (base_addr + 0x10)
+#define GEMINI_IRQ_STATUS(base_addr)   (base_addr + 0x14)
+#define GEMINI_FIQ_SOURCE(base_addr)   (base_addr + 0x20)
+#define GEMINI_FIQ_MASK(base_addr)     (base_addr + 0x24)
+#define GEMINI_FIQ_CLEAR(base_addr)    (base_addr + 0x28)
+#define GEMINI_FIQ_MODE(base_addr)     (base_addr + 0x2C)
+#define GEMINI_FIQ_POLARITY(base_addr) (base_addr + 0x30)
+#define GEMINI_FIQ_STATUS(base_addr)   (base_addr + 0x34)
+
+/**
+ * struct gemini_irq_data - irq data container for the Gemini IRQ controller
+ * @base: memory offset in virtual memory
+ * @chip: chip container for this instance
+ * @domain: IRQ domain for this instance
+ */
+struct gemini_irq_data {
+       void __iomem *base;
+       struct irq_chip chip;
+       struct irq_domain *domain;
+};
+
+static void gemini_irq_mask(struct irq_data *d)
+{
+       struct gemini_irq_data *g = irq_data_get_irq_chip_data(d);
+       unsigned int mask;
+
+       mask = readl(GEMINI_IRQ_MASK(g->base));
+       mask &= ~BIT(irqd_to_hwirq(d));
+       writel(mask, GEMINI_IRQ_MASK(g->base));
+}
+
+static void gemini_irq_unmask(struct irq_data *d)
+{
+       struct gemini_irq_data *g = irq_data_get_irq_chip_data(d);
+       unsigned int mask;
+
+       mask = readl(GEMINI_IRQ_MASK(g->base));
+       mask |= BIT(irqd_to_hwirq(d));
+       writel(mask, GEMINI_IRQ_MASK(g->base));
+}
+
+static void gemini_irq_ack(struct irq_data *d)
+{
+       struct gemini_irq_data *g = irq_data_get_irq_chip_data(d);
+
+       writel(BIT(irqd_to_hwirq(d)), GEMINI_IRQ_CLEAR(g->base));
+}
+
+static int gemini_irq_set_type(struct irq_data *d, unsigned int trigger)
+{
+       struct gemini_irq_data *g = irq_data_get_irq_chip_data(d);
+       int offset = irqd_to_hwirq(d);
+       u32 mode, polarity;
+
+       mode = readl(GEMINI_IRQ_MODE(g->base));
+       polarity = readl(GEMINI_IRQ_POLARITY(g->base));
+
+       if (trigger & (IRQ_TYPE_LEVEL_HIGH)) {
+               irq_set_handler_locked(d, handle_level_irq);
+               /* Disable edge detection */
+               mode &= ~BIT(offset);
+               polarity &= ~BIT(offset);
+       } else if (trigger & IRQ_TYPE_EDGE_RISING) {
+               irq_set_handler_locked(d, handle_edge_irq);
+               mode |= BIT(offset);
+               polarity |= BIT(offset);
+       } else if (trigger & IRQ_TYPE_EDGE_FALLING) {
+               irq_set_handler_locked(d, handle_edge_irq);
+               mode |= BIT(offset);
+               polarity &= ~BIT(offset);
+       } else {
+               irq_set_handler_locked(d, handle_bad_irq);
+               pr_warn("GEMINI IRQ: no supported trigger selected for line %d\n",
+                       offset);
+       }
+
+       writel(mode, GEMINI_IRQ_MODE(g->base));
+       writel(polarity, GEMINI_IRQ_POLARITY(g->base));
+
+       return 0;
+}
+
+static struct irq_chip gemini_irq_chip = {
+       .name           = "GEMINI",
+       .irq_ack        = gemini_irq_ack,
+       .irq_mask       = gemini_irq_mask,
+       .irq_unmask     = gemini_irq_unmask,
+       .irq_set_type   = gemini_irq_set_type,
+};
+
+/* Local static for the IRQ entry call */
+static struct gemini_irq_data girq;
+
+asmlinkage void __exception_irq_entry gemini_irqchip_handle_irq(struct pt_regs *regs)
+{
+       struct gemini_irq_data *g = &girq;
+       int irq;
+       u32 status;
+
+       while ((status = readl(GEMINI_IRQ_STATUS(g->base)))) {
+               irq = ffs(status) - 1;
+               handle_domain_irq(g->domain, irq, regs);
+       }
+}
+
+static int gemini_irqdomain_map(struct irq_domain *d, unsigned int irq,
+                               irq_hw_number_t hwirq)
+{
+       struct gemini_irq_data *g = d->host_data;
+
+       irq_set_chip_data(irq, g);
+       /* All IRQs should set up their type, flags as bad by default */
+       irq_set_chip_and_handler(irq, &gemini_irq_chip, handle_bad_irq);
+       irq_set_probe(irq);
+
+       return 0;
+}
+
+static void gemini_irqdomain_unmap(struct irq_domain *d, unsigned int irq)
+{
+       irq_set_chip_and_handler(irq, NULL, NULL);
+       irq_set_chip_data(irq, NULL);
+}
+
+static const struct irq_domain_ops gemini_irqdomain_ops = {
+       .map = gemini_irqdomain_map,
+       .unmap = gemini_irqdomain_unmap,
+       .xlate = irq_domain_xlate_onetwocell,
+};
+
+int __init gemini_of_init_irq(struct device_node *node,
+                             struct device_node *parent)
+{
+       struct gemini_irq_data *g = &girq;
+
+       /*
+        * Disable the idle handler by default since it is buggy
+        * For more info see arch/arm/mach-gemini/idle.c
+        */
+       cpu_idle_poll_ctrl(true);
+
+       g->base = of_iomap(node, 0);
+       WARN(!g->base, "unable to map gemini irq registers\n");
+
+       /* Disable all interrupts */
+       writel(0, GEMINI_IRQ_MASK(g->base));
+       writel(0, GEMINI_FIQ_MASK(g->base));
+
+       g->domain = irq_domain_add_simple(node, GEMINI_NUM_IRQS, 0,
+                                         &gemini_irqdomain_ops, g);
+       set_handle_irq(gemini_irqchip_handle_irq);
+
+       return 0;
+}
+IRQCHIP_DECLARE(gemini, "cortina,gemini-interrupt-controller",
+               gemini_of_init_irq);
index 69b040f47d56a852f5a2af8b77b595442b70d2ad..23201004fd7a68e39055a69abbd41019aa66b12b 100644 (file)
@@ -161,7 +161,7 @@ struct its_cmd_desc {
                        struct its_device *dev;
                        u32 phys_id;
                        u32 event_id;
-               } its_mapvi_cmd;
+               } its_mapti_cmd;
 
                struct {
                        struct its_device *dev;
@@ -193,58 +193,56 @@ struct its_cmd_block {
 typedef struct its_collection *(*its_cmd_builder_t)(struct its_cmd_block *,
                                                    struct its_cmd_desc *);
 
+static void its_mask_encode(u64 *raw_cmd, u64 val, int h, int l)
+{
+       u64 mask = GENMASK_ULL(h, l);
+       *raw_cmd &= ~mask;
+       *raw_cmd |= (val << l) & mask;
+}
+
 static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr)
 {
-       cmd->raw_cmd[0] &= ~0xffULL;
-       cmd->raw_cmd[0] |= cmd_nr;
+       its_mask_encode(&cmd->raw_cmd[0], cmd_nr, 7, 0);
 }
 
 static void its_encode_devid(struct its_cmd_block *cmd, u32 devid)
 {
-       cmd->raw_cmd[0] &= BIT_ULL(32) - 1;
-       cmd->raw_cmd[0] |= ((u64)devid) << 32;
+       its_mask_encode(&cmd->raw_cmd[0], devid, 63, 32);
 }
 
 static void its_encode_event_id(struct its_cmd_block *cmd, u32 id)
 {
-       cmd->raw_cmd[1] &= ~0xffffffffULL;
-       cmd->raw_cmd[1] |= id;
+       its_mask_encode(&cmd->raw_cmd[1], id, 31, 0);
 }
 
 static void its_encode_phys_id(struct its_cmd_block *cmd, u32 phys_id)
 {
-       cmd->raw_cmd[1] &= 0xffffffffULL;
-       cmd->raw_cmd[1] |= ((u64)phys_id) << 32;
+       its_mask_encode(&cmd->raw_cmd[1], phys_id, 63, 32);
 }
 
 static void its_encode_size(struct its_cmd_block *cmd, u8 size)
 {
-       cmd->raw_cmd[1] &= ~0x1fULL;
-       cmd->raw_cmd[1] |= size & 0x1f;
+       its_mask_encode(&cmd->raw_cmd[1], size, 4, 0);
 }
 
 static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr)
 {
-       cmd->raw_cmd[2] &= ~0xffffffffffffULL;
-       cmd->raw_cmd[2] |= itt_addr & 0xffffffffff00ULL;
+       its_mask_encode(&cmd->raw_cmd[2], itt_addr >> 8, 50, 8);
 }
 
 static void its_encode_valid(struct its_cmd_block *cmd, int valid)
 {
-       cmd->raw_cmd[2] &= ~(1ULL << 63);
-       cmd->raw_cmd[2] |= ((u64)!!valid) << 63;
+       its_mask_encode(&cmd->raw_cmd[2], !!valid, 63, 63);
 }
 
 static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr)
 {
-       cmd->raw_cmd[2] &= ~(0xffffffffULL << 16);
-       cmd->raw_cmd[2] |= (target_addr & (0xffffffffULL << 16));
+       its_mask_encode(&cmd->raw_cmd[2], target_addr >> 16, 50, 16);
 }
 
 static void its_encode_collection(struct its_cmd_block *cmd, u16 col)
 {
-       cmd->raw_cmd[2] &= ~0xffffULL;
-       cmd->raw_cmd[2] |= col;
+       its_mask_encode(&cmd->raw_cmd[2], col, 15, 0);
 }
 
 static inline void its_fixup_cmd(struct its_cmd_block *cmd)
@@ -289,18 +287,18 @@ static struct its_collection *its_build_mapc_cmd(struct its_cmd_block *cmd,
        return desc->its_mapc_cmd.col;
 }
 
-static struct its_collection *its_build_mapvi_cmd(struct its_cmd_block *cmd,
+static struct its_collection *its_build_mapti_cmd(struct its_cmd_block *cmd,
                                                  struct its_cmd_desc *desc)
 {
        struct its_collection *col;
 
-       col = dev_event_to_col(desc->its_mapvi_cmd.dev,
-                              desc->its_mapvi_cmd.event_id);
+       col = dev_event_to_col(desc->its_mapti_cmd.dev,
+                              desc->its_mapti_cmd.event_id);
 
-       its_encode_cmd(cmd, GITS_CMD_MAPVI);
-       its_encode_devid(cmd, desc->its_mapvi_cmd.dev->device_id);
-       its_encode_event_id(cmd, desc->its_mapvi_cmd.event_id);
-       its_encode_phys_id(cmd, desc->its_mapvi_cmd.phys_id);
+       its_encode_cmd(cmd, GITS_CMD_MAPTI);
+       its_encode_devid(cmd, desc->its_mapti_cmd.dev->device_id);
+       its_encode_event_id(cmd, desc->its_mapti_cmd.event_id);
+       its_encode_phys_id(cmd, desc->its_mapti_cmd.phys_id);
        its_encode_collection(cmd, col->col_id);
 
        its_fixup_cmd(cmd);
@@ -413,6 +411,12 @@ static struct its_cmd_block *its_allocate_entry(struct its_node *its)
        if (its->cmd_write == (its->cmd_base + ITS_CMD_QUEUE_NR_ENTRIES))
                its->cmd_write = its->cmd_base;
 
+       /* Clear command  */
+       cmd->raw_cmd[0] = 0;
+       cmd->raw_cmd[1] = 0;
+       cmd->raw_cmd[2] = 0;
+       cmd->raw_cmd[3] = 0;
+
        return cmd;
 }
 
@@ -531,15 +535,15 @@ static void its_send_mapc(struct its_node *its, struct its_collection *col,
        its_send_single_command(its, its_build_mapc_cmd, &desc);
 }
 
-static void its_send_mapvi(struct its_device *dev, u32 irq_id, u32 id)
+static void its_send_mapti(struct its_device *dev, u32 irq_id, u32 id)
 {
        struct its_cmd_desc desc;
 
-       desc.its_mapvi_cmd.dev = dev;
-       desc.its_mapvi_cmd.phys_id = irq_id;
-       desc.its_mapvi_cmd.event_id = id;
+       desc.its_mapti_cmd.dev = dev;
+       desc.its_mapti_cmd.phys_id = irq_id;
+       desc.its_mapti_cmd.event_id = id;
 
-       its_send_single_command(dev->its, its_build_mapvi_cmd, &desc);
+       its_send_single_command(dev->its, its_build_mapti_cmd, &desc);
 }
 
 static void its_send_movi(struct its_device *dev,
@@ -824,7 +828,7 @@ static int __init its_alloc_lpi_tables(void)
 static const char *its_base_type_string[] = {
        [GITS_BASER_TYPE_DEVICE]        = "Devices",
        [GITS_BASER_TYPE_VCPU]          = "Virtual CPUs",
-       [GITS_BASER_TYPE_CPU]           = "Physical CPUs",
+       [GITS_BASER_TYPE_RESERVED3]     = "Reserved (3)",
        [GITS_BASER_TYPE_COLLECTION]    = "Interrupt Collections",
        [GITS_BASER_TYPE_RESERVED5]     = "Reserved (5)",
        [GITS_BASER_TYPE_RESERVED6]     = "Reserved (6)",
@@ -960,7 +964,7 @@ static bool its_parse_baser_device(struct its_node *its, struct its_baser *baser
                                   u32 psz, u32 *order)
 {
        u64 esz = GITS_BASER_ENTRY_SIZE(its_read_baser(its, baser));
-       u64 val = GITS_BASER_InnerShareable | GITS_BASER_WaWb;
+       u64 val = GITS_BASER_InnerShareable | GITS_BASER_RaWaWb;
        u32 ids = its->device_ids;
        u32 new_order = *order;
        bool indirect = false;
@@ -1025,7 +1029,7 @@ static int its_alloc_tables(struct its_node *its)
        u64 typer = gic_read_typer(its->base + GITS_TYPER);
        u32 ids = GITS_TYPER_DEVBITS(typer);
        u64 shr = GITS_BASER_InnerShareable;
-       u64 cache = GITS_BASER_WaWb;
+       u64 cache = GITS_BASER_RaWaWb;
        u32 psz = SZ_64K;
        int err, i;
 
@@ -1122,7 +1126,7 @@ static void its_cpu_init_lpis(void)
        /* set PROPBASE */
        val = (page_to_phys(gic_rdists->prop_page) |
               GICR_PROPBASER_InnerShareable |
-              GICR_PROPBASER_WaWb |
+              GICR_PROPBASER_RaWaWb |
               ((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK));
 
        gicr_write_propbaser(val, rbase + GICR_PROPBASER);
@@ -1147,7 +1151,7 @@ static void its_cpu_init_lpis(void)
        /* set PENDBASE */
        val = (page_to_phys(pend_page) |
               GICR_PENDBASER_InnerShareable |
-              GICR_PENDBASER_WaWb);
+              GICR_PENDBASER_RaWaWb);
 
        gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
        tmp = gicr_read_pendbaser(rbase + GICR_PENDBASER);
@@ -1498,7 +1502,7 @@ static void its_irq_domain_activate(struct irq_domain *domain,
        its_dev->event_map.col_map[event] = cpumask_first(cpu_mask);
 
        /* Map the GIC IRQ and event to the device */
-       its_send_mapvi(its_dev, d->hwirq, event);
+       its_send_mapti(its_dev, d->hwirq, event);
 }
 
 static void its_irq_domain_deactivate(struct irq_domain *domain,
@@ -1642,6 +1646,7 @@ static int its_init_domain(struct fwnode_handle *handle, struct its_node *its)
 
        inner_domain->parent = its_parent;
        inner_domain->bus_token = DOMAIN_BUS_NEXUS;
+       inner_domain->flags |= IRQ_DOMAIN_FLAG_MSI_REMAP;
        info->ops = &its_msi_domain_ops;
        info->data = its;
        inner_domain->host_data = info;
@@ -1693,7 +1698,8 @@ static int __init its_probe_one(struct resource *res,
        its->ite_size = ((gic_read_typer(its_base + GITS_TYPER) >> 4) & 0xf) + 1;
        its->numa_node = numa_node;
 
-       its->cmd_base = kzalloc(ITS_CMD_QUEUE_SZ, GFP_KERNEL);
+       its->cmd_base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+                                               get_order(ITS_CMD_QUEUE_SZ));
        if (!its->cmd_base) {
                err = -ENOMEM;
                goto out_free_its;
@@ -1711,7 +1717,7 @@ static int __init its_probe_one(struct resource *res,
                goto out_free_tables;
 
        baser = (virt_to_phys(its->cmd_base)    |
-                GITS_CBASER_WaWb               |
+                GITS_CBASER_RaWaWb             |
                 GITS_CBASER_InnerShareable     |
                 (ITS_CMD_QUEUE_SZ / SZ_4K - 1) |
                 GITS_CBASER_VALID);
@@ -1751,7 +1757,7 @@ static int __init its_probe_one(struct resource *res,
 out_free_tables:
        its_free_tables(its);
 out_free_cmd:
-       kfree(its->cmd_base);
+       free_pages((unsigned long)its->cmd_base, get_order(ITS_CMD_QUEUE_SZ));
 out_free_its:
        kfree(its);
 out_unmap:
index 54a5e870a8f56d627f9622a4a9fb03e07347aa0a..efbcf8435185244cfbfac87f3f36661d88a13ceb 100644 (file)
@@ -19,9 +19,9 @@
 #include <linux/bitops.h>
 #include <linux/module.h>
 #include <linux/moduleparam.h>
+#include <linux/interrupt.h>
 #include <linux/irqdomain.h>
 #include <linux/irqchip.h>
-#include <linux/irqchip/chained_irq.h>
 #include <linux/of.h>
 #include <linux/of_platform.h>
 #include <linux/mfd/syscon.h>
@@ -39,6 +39,7 @@ struct keystone_irq_device {
        struct irq_domain       *irqd;
        struct regmap           *devctrl_regs;
        u32                     devctrl_offset;
+       raw_spinlock_t          wa_lock;
 };
 
 static inline u32 keystone_irq_readl(struct keystone_irq_device *kirq)
@@ -83,17 +84,15 @@ static void keystone_irq_ack(struct irq_data *d)
        /* nothing to do here */
 }
 
-static void keystone_irq_handler(struct irq_desc *desc)
+static irqreturn_t keystone_irq_handler(int irq, void *keystone_irq)
 {
-       unsigned int irq = irq_desc_get_irq(desc);
-       struct keystone_irq_device *kirq = irq_desc_get_handler_data(desc);
+       struct keystone_irq_device *kirq = keystone_irq;
+       unsigned long wa_lock_flags;
        unsigned long pending;
        int src, virq;
 
        dev_dbg(kirq->dev, "start irq %d\n", irq);
 
-       chained_irq_enter(irq_desc_get_chip(desc), desc);
-
        pending = keystone_irq_readl(kirq);
        keystone_irq_writel(kirq, pending);
 
@@ -111,13 +110,15 @@ static void keystone_irq_handler(struct irq_desc *desc)
                        if (!virq)
                                dev_warn(kirq->dev, "spurious irq detected hwirq %d, virq %d\n",
                                         src, virq);
+                       raw_spin_lock_irqsave(&kirq->wa_lock, wa_lock_flags);
                        generic_handle_irq(virq);
+                       raw_spin_unlock_irqrestore(&kirq->wa_lock,
+                                                  wa_lock_flags);
                }
        }
 
-       chained_irq_exit(irq_desc_get_chip(desc), desc);
-
        dev_dbg(kirq->dev, "end irq %d\n", irq);
+       return IRQ_HANDLED;
 }
 
 static int keystone_irq_map(struct irq_domain *h, unsigned int virq,
@@ -182,9 +183,16 @@ static int keystone_irq_probe(struct platform_device *pdev)
                return -ENODEV;
        }
 
+       raw_spin_lock_init(&kirq->wa_lock);
+
        platform_set_drvdata(pdev, kirq);
 
-       irq_set_chained_handler_and_data(kirq->irq, keystone_irq_handler, kirq);
+       ret = request_irq(kirq->irq, keystone_irq_handler,
+                         0, dev_name(dev), kirq);
+       if (ret) {
+               irq_domain_remove(kirq->irqd);
+               return ret;
+       }
 
        /* clear all source bits */
        keystone_irq_writel(kirq, ~0x0);
@@ -199,6 +207,8 @@ static int keystone_irq_remove(struct platform_device *pdev)
        struct keystone_irq_device *kirq = platform_get_drvdata(pdev);
        int hwirq;
 
+       free_irq(kirq->irq, kirq);
+
        for (hwirq = 0; hwirq < KEYSTONE_N_IRQ; hwirq++)
                irq_dispose_mapping(irq_find_mapping(kirq->irqd, hwirq));
 
index c01c09e9916d3f87a6bdbdae79bebeb0f9fffd39..11d12bccc4e7f10d72fa41f5464ed4d6b6f02a5f 100644 (file)
@@ -968,6 +968,34 @@ static struct irq_domain_ops gic_ipi_domain_ops = {
        .match = gic_ipi_domain_match,
 };
 
+static void __init gic_map_single_int(struct device_node *node,
+                                     unsigned int irq)
+{
+       unsigned int linux_irq;
+       struct irq_fwspec local_int_fwspec = {
+               .fwnode         = &node->fwnode,
+               .param_count    = 3,
+               .param          = {
+                       [0]     = GIC_LOCAL,
+                       [1]     = irq,
+                       [2]     = IRQ_TYPE_NONE,
+               },
+       };
+
+       if (!gic_local_irq_is_routable(irq))
+               return;
+
+       linux_irq = irq_create_fwspec_mapping(&local_int_fwspec);
+       WARN_ON(!linux_irq);
+}
+
+static void __init gic_map_interrupts(struct device_node *node)
+{
+       gic_map_single_int(node, GIC_LOCAL_INT_TIMER);
+       gic_map_single_int(node, GIC_LOCAL_INT_PERFCTR);
+       gic_map_single_int(node, GIC_LOCAL_INT_FDC);
+}
+
 static void __init __gic_init(unsigned long gic_base_addr,
                              unsigned long gic_addrspace_size,
                              unsigned int cpu_vec, unsigned int irqbase,
@@ -1067,6 +1095,7 @@ static void __init __gic_init(unsigned long gic_base_addr,
        }
 
        gic_basic_init();
+       gic_map_interrupts(node);
 }
 
 void __init gic_init(unsigned long gic_base_addr,
index 17304705f2cf9443b99690565ece5b537e3e61c4..05fa9f7af53cd78732802dfbec28c27770e6f01f 100644 (file)
@@ -131,12 +131,16 @@ static struct irq_chip mxs_icoll_chip = {
        .irq_ack = icoll_ack_irq,
        .irq_mask = icoll_mask_irq,
        .irq_unmask = icoll_unmask_irq,
+       .flags = IRQCHIP_MASK_ON_SUSPEND |
+                IRQCHIP_SKIP_SET_WAKE,
 };
 
 static struct irq_chip asm9260_icoll_chip = {
        .irq_ack = icoll_ack_irq,
        .irq_mask = asm9260_mask_irq,
        .irq_unmask = asm9260_unmask_irq,
+       .flags = IRQCHIP_MASK_ON_SUSPEND |
+                IRQCHIP_SKIP_SET_WAKE,
 };
 
 asmlinkage void __exception_irq_entry icoll_handle_irq(struct pt_regs *regs)
diff --git a/drivers/irqchip/qcom-irq-combiner.c b/drivers/irqchip/qcom-irq-combiner.c
new file mode 100644 (file)
index 0000000..2265586
--- /dev/null
@@ -0,0 +1,296 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * Driver for interrupt combiners in the Top-level Control and Status
+ * Registers (TCSR) hardware block in Qualcomm Technologies chips.
+ * An interrupt combiner in this block combines a set of interrupts by
+ * OR'ing the individual interrupt signals into a summary interrupt
+ * signal routed to a parent interrupt controller, and provides read-
+ * only, 32-bit registers to query the status of individual interrupts.
+ * The status bit for IRQ n is bit (n % 32) within register (n / 32)
+ * of the given combiner. Thus, each combiner can be described as a set
+ * of register offsets and the number of IRQs managed.
+ */
+
+#define pr_fmt(fmt) "QCOM80B1:" fmt
+
+#include <linux/acpi.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/platform_device.h>
+
+#define REG_SIZE 32
+
+struct combiner_reg {
+       void __iomem *addr;
+       unsigned long enabled;
+};
+
+struct combiner {
+       struct irq_domain   *domain;
+       int                 parent_irq;
+       u32                 nirqs;
+       u32                 nregs;
+       struct combiner_reg regs[0];
+};
+
+static inline int irq_nr(u32 reg, u32 bit)
+{
+       return reg * REG_SIZE + bit;
+}
+
+/*
+ * Handler for the cascaded IRQ.
+ */
+static void combiner_handle_irq(struct irq_desc *desc)
+{
+       struct combiner *combiner = irq_desc_get_handler_data(desc);
+       struct irq_chip *chip = irq_desc_get_chip(desc);
+       u32 reg;
+
+       chained_irq_enter(chip, desc);
+
+       for (reg = 0; reg < combiner->nregs; reg++) {
+               int virq;
+               int hwirq;
+               u32 bit;
+               u32 status;
+
+               bit = readl_relaxed(combiner->regs[reg].addr);
+               status = bit & combiner->regs[reg].enabled;
+               if (!status)
+                       pr_warn_ratelimited("Unexpected IRQ on CPU%d: (%08x %08lx %p)\n",
+                                           smp_processor_id(), bit,
+                                           combiner->regs[reg].enabled,
+                                           combiner->regs[reg].addr);
+
+               while (status) {
+                       bit = __ffs(status);
+                       status &= ~(1 << bit);
+                       hwirq = irq_nr(reg, bit);
+                       virq = irq_find_mapping(combiner->domain, hwirq);
+                       if (virq > 0)
+                               generic_handle_irq(virq);
+
+               }
+       }
+
+       chained_irq_exit(chip, desc);
+}
+
+static void combiner_irq_chip_mask_irq(struct irq_data *data)
+{
+       struct combiner *combiner = irq_data_get_irq_chip_data(data);
+       struct combiner_reg *reg = combiner->regs + data->hwirq / REG_SIZE;
+
+       clear_bit(data->hwirq % REG_SIZE, &reg->enabled);
+}
+
+static void combiner_irq_chip_unmask_irq(struct irq_data *data)
+{
+       struct combiner *combiner = irq_data_get_irq_chip_data(data);
+       struct combiner_reg *reg = combiner->regs + data->hwirq / REG_SIZE;
+
+       set_bit(data->hwirq % REG_SIZE, &reg->enabled);
+}
+
+static struct irq_chip irq_chip = {
+       .irq_mask = combiner_irq_chip_mask_irq,
+       .irq_unmask = combiner_irq_chip_unmask_irq,
+       .name = "qcom-irq-combiner"
+};
+
+static int combiner_irq_map(struct irq_domain *domain, unsigned int irq,
+                                  irq_hw_number_t hwirq)
+{
+       irq_set_chip_and_handler(irq, &irq_chip, handle_level_irq);
+       irq_set_chip_data(irq, domain->host_data);
+       irq_set_noprobe(irq);
+       return 0;
+}
+
+static void combiner_irq_unmap(struct irq_domain *domain, unsigned int irq)
+{
+       irq_domain_reset_irq_data(irq_get_irq_data(irq));
+}
+
+static int combiner_irq_translate(struct irq_domain *d, struct irq_fwspec *fws,
+                                 unsigned long *hwirq, unsigned int *type)
+{
+       struct combiner *combiner = d->host_data;
+
+       if (is_acpi_node(fws->fwnode)) {
+               if (WARN_ON((fws->param_count != 2) ||
+                           (fws->param[0] >= combiner->nirqs) ||
+                           (fws->param[1] & IORESOURCE_IRQ_LOWEDGE) ||
+                           (fws->param[1] & IORESOURCE_IRQ_HIGHEDGE)))
+                       return -EINVAL;
+
+               *hwirq = fws->param[0];
+               *type = fws->param[1];
+               return 0;
+       }
+
+       return -EINVAL;
+}
+
+static const struct irq_domain_ops domain_ops = {
+       .map = combiner_irq_map,
+       .unmap = combiner_irq_unmap,
+       .translate = combiner_irq_translate
+};
+
+static acpi_status count_registers_cb(struct acpi_resource *ares, void *context)
+{
+       int *count = context;
+
+       if (ares->type == ACPI_RESOURCE_TYPE_GENERIC_REGISTER)
+               ++(*count);
+       return AE_OK;
+}
+
+static int count_registers(struct platform_device *pdev)
+{
+       acpi_handle ahandle = ACPI_HANDLE(&pdev->dev);
+       acpi_status status;
+       int count = 0;
+
+       if (!acpi_has_method(ahandle, METHOD_NAME__CRS))
+               return -EINVAL;
+
+       status = acpi_walk_resources(ahandle, METHOD_NAME__CRS,
+                                    count_registers_cb, &count);
+       if (ACPI_FAILURE(status))
+               return -EINVAL;
+       return count;
+}
+
+struct get_registers_context {
+       struct device *dev;
+       struct combiner *combiner;
+       int err;
+};
+
+static acpi_status get_registers_cb(struct acpi_resource *ares, void *context)
+{
+       struct get_registers_context *ctx = context;
+       struct acpi_resource_generic_register *reg;
+       phys_addr_t paddr;
+       void __iomem *vaddr;
+
+       if (ares->type != ACPI_RESOURCE_TYPE_GENERIC_REGISTER)
+               return AE_OK;
+
+       reg = &ares->data.generic_reg;
+       paddr = reg->address;
+       if ((reg->space_id != ACPI_SPACE_MEM) ||
+           (reg->bit_offset != 0) ||
+           (reg->bit_width > REG_SIZE)) {
+               dev_err(ctx->dev, "Bad register resource @%pa\n", &paddr);
+               ctx->err = -EINVAL;
+               return AE_ERROR;
+       }
+
+       vaddr = devm_ioremap(ctx->dev, reg->address, REG_SIZE);
+       if (!vaddr) {
+               dev_err(ctx->dev, "Can't map register @%pa\n", &paddr);
+               ctx->err = -ENOMEM;
+               return AE_ERROR;
+       }
+
+       ctx->combiner->regs[ctx->combiner->nregs].addr = vaddr;
+       ctx->combiner->nirqs += reg->bit_width;
+       ctx->combiner->nregs++;
+       return AE_OK;
+}
+
+static int get_registers(struct platform_device *pdev, struct combiner *comb)
+{
+       acpi_handle ahandle = ACPI_HANDLE(&pdev->dev);
+       acpi_status status;
+       struct get_registers_context ctx;
+
+       if (!acpi_has_method(ahandle, METHOD_NAME__CRS))
+               return -EINVAL;
+
+       ctx.dev = &pdev->dev;
+       ctx.combiner = comb;
+       ctx.err = 0;
+
+       status = acpi_walk_resources(ahandle, METHOD_NAME__CRS,
+                                    get_registers_cb, &ctx);
+       if (ACPI_FAILURE(status))
+               return ctx.err;
+       return 0;
+}
+
+static int __init combiner_probe(struct platform_device *pdev)
+{
+       struct combiner *combiner;
+       size_t alloc_sz;
+       u32 nregs;
+       int err;
+
+       nregs = count_registers(pdev);
+       if (nregs <= 0) {
+               dev_err(&pdev->dev, "Error reading register resources\n");
+               return -EINVAL;
+       }
+
+       alloc_sz = sizeof(*combiner) + sizeof(struct combiner_reg) * nregs;
+       combiner = devm_kzalloc(&pdev->dev, alloc_sz, GFP_KERNEL);
+       if (!combiner)
+               return -ENOMEM;
+
+       err = get_registers(pdev, combiner);
+       if (err < 0)
+               return err;
+
+       combiner->parent_irq = platform_get_irq(pdev, 0);
+       if (combiner->parent_irq <= 0) {
+               dev_err(&pdev->dev, "Error getting IRQ resource\n");
+               return -EPROBE_DEFER;
+       }
+
+       combiner->domain = irq_domain_create_linear(pdev->dev.fwnode, combiner->nirqs,
+                                                   &domain_ops, combiner);
+       if (!combiner->domain)
+               /* Errors printed by irq_domain_create_linear */
+               return -ENODEV;
+
+       irq_set_chained_handler_and_data(combiner->parent_irq,
+                                        combiner_handle_irq, combiner);
+
+       dev_info(&pdev->dev, "Initialized with [p=%d,n=%d,r=%p]\n",
+                combiner->parent_irq, combiner->nirqs, combiner->regs[0].addr);
+       return 0;
+}
+
+static const struct acpi_device_id qcom_irq_combiner_ids[] = {
+       { "QCOM80B1", },
+       { }
+};
+
+static struct platform_driver qcom_irq_combiner_probe = {
+       .driver = {
+               .name = "qcom-irq-combiner",
+               .acpi_match_table = ACPI_PTR(qcom_irq_combiner_ids),
+       },
+       .probe = combiner_probe,
+};
+
+static int __init register_qcom_irq_combiner(void)
+{
+       return platform_driver_register(&qcom_irq_combiner_probe);
+}
+device_initcall(register_qcom_irq_combiner);
index 1a1d99704fe694ad2f0c19933fdfc7df89803da9..296f1411fe84208d8c2511b8866d3f6936f8254a 100644 (file)
@@ -11297,7 +11297,8 @@ static void mixer_notify_update(PLCI *plci, byte others)
                                ((CAPI_MSG *) msg)->header.ncci = 0;
                                ((CAPI_MSG *) msg)->info.facility_req.Selector = SELECTOR_LINE_INTERCONNECT;
                                ((CAPI_MSG *) msg)->info.facility_req.structs[0] = 3;
-                               PUT_WORD(&(((CAPI_MSG *) msg)->info.facility_req.structs[1]), LI_REQ_SILENT_UPDATE);
+                               ((CAPI_MSG *) msg)->info.facility_req.structs[1] = LI_REQ_SILENT_UPDATE & 0xff;
+                               ((CAPI_MSG *) msg)->info.facility_req.structs[2] = LI_REQ_SILENT_UPDATE >> 8;
                                ((CAPI_MSG *) msg)->info.facility_req.structs[3] = 0;
                                w = api_put(notify_plci->appl, (CAPI_MSG *) msg);
                                if (w != _QUEUE_FULL)
index 9cb4b621fbc3cffe0bceb770bde7dbdac292dd95..b324474c0c12ee6e81b658ee736e89e297f0224a 100644 (file)
@@ -203,7 +203,7 @@ mISDNStackd(void *data)
 {
        struct mISDNstack *st = data;
 #ifdef MISDN_MSG_STATS
-       cputime_t utime, stime;
+       u64 utime, stime;
 #endif
        int err = 0;
 
@@ -308,7 +308,7 @@ mISDNStackd(void *data)
               st->stopped_cnt);
        task_cputime(st->thread, &utime, &stime);
        printk(KERN_DEBUG
-              "mISDNStackd daemon for %s utime(%ld) stime(%ld)\n",
+              "mISDNStackd daemon for %s utime(%llu) stime(%llu)\n",
               dev_name(&st->dev->dev), utime, stime);
        printk(KERN_DEBUG
               "mISDNStackd daemon for %s nvcsw(%ld) nivcsw(%ld)\n",
index c621cbbb5768a79e95869344fab58461d15f3e4e..275f467956eedf5469661be675fad42a95d16b0d 100644 (file)
@@ -29,6 +29,15 @@ config LEDS_CLASS_FLASH
          for the flash related features of a LED device. It can be built
          as a module.
 
+config LEDS_BRIGHTNESS_HW_CHANGED
+       bool "LED Class brightness_hw_changed attribute support"
+       depends on LEDS_CLASS
+       help
+         This option enables support for the brightness_hw_changed attribute
+         for led sysfs class devices under /sys/class/leds.
+
+         See Documentation/ABI/testing/sysfs-class-led for details.
+
 comment "LED drivers"
 
 config LEDS_88PM860X
index 326ee6e925a205531f97f0815451e4af87505ceb..f2b0a80a62b46712a17e642efebd148ea2c99537 100644 (file)
@@ -103,6 +103,68 @@ static const struct attribute_group *led_groups[] = {
        NULL,
 };
 
+#ifdef CONFIG_LEDS_BRIGHTNESS_HW_CHANGED
+static ssize_t brightness_hw_changed_show(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       struct led_classdev *led_cdev = dev_get_drvdata(dev);
+
+       if (led_cdev->brightness_hw_changed == -1)
+               return -ENODATA;
+
+       return sprintf(buf, "%u\n", led_cdev->brightness_hw_changed);
+}
+
+static DEVICE_ATTR_RO(brightness_hw_changed);
+
+static int led_add_brightness_hw_changed(struct led_classdev *led_cdev)
+{
+       struct device *dev = led_cdev->dev;
+       int ret;
+
+       ret = device_create_file(dev, &dev_attr_brightness_hw_changed);
+       if (ret) {
+               dev_err(dev, "Error creating brightness_hw_changed\n");
+               return ret;
+       }
+
+       led_cdev->brightness_hw_changed_kn =
+               sysfs_get_dirent(dev->kobj.sd, "brightness_hw_changed");
+       if (!led_cdev->brightness_hw_changed_kn) {
+               dev_err(dev, "Error getting brightness_hw_changed kn\n");
+               device_remove_file(dev, &dev_attr_brightness_hw_changed);
+               return -ENXIO;
+       }
+
+       return 0;
+}
+
+static void led_remove_brightness_hw_changed(struct led_classdev *led_cdev)
+{
+       sysfs_put(led_cdev->brightness_hw_changed_kn);
+       device_remove_file(led_cdev->dev, &dev_attr_brightness_hw_changed);
+}
+
+void led_classdev_notify_brightness_hw_changed(struct led_classdev *led_cdev,
+                                              enum led_brightness brightness)
+{
+       if (WARN_ON(!led_cdev->brightness_hw_changed_kn))
+               return;
+
+       led_cdev->brightness_hw_changed = brightness;
+       sysfs_notify_dirent(led_cdev->brightness_hw_changed_kn);
+}
+EXPORT_SYMBOL_GPL(led_classdev_notify_brightness_hw_changed);
+#else
+static int led_add_brightness_hw_changed(struct led_classdev *led_cdev)
+{
+       return 0;
+}
+static void led_remove_brightness_hw_changed(struct led_classdev *led_cdev)
+{
+}
+#endif
+
 /**
  * led_classdev_suspend - suspend an led_classdev.
  * @led_cdev: the led_classdev to suspend.
@@ -204,9 +266,20 @@ int led_classdev_register(struct device *parent, struct led_classdev *led_cdev)
                dev_warn(parent, "Led %s renamed to %s due to name collision",
                                led_cdev->name, dev_name(led_cdev->dev));
 
+       if (led_cdev->flags & LED_BRIGHT_HW_CHANGED) {
+               ret = led_add_brightness_hw_changed(led_cdev);
+               if (ret) {
+                       device_unregister(led_cdev->dev);
+                       return ret;
+               }
+       }
+
        led_cdev->work_flags = 0;
 #ifdef CONFIG_LEDS_TRIGGERS
        init_rwsem(&led_cdev->trigger_lock);
+#endif
+#ifdef CONFIG_LEDS_BRIGHTNESS_HW_CHANGED
+       led_cdev->brightness_hw_changed = -1;
 #endif
        mutex_init(&led_cdev->led_access);
        /* add to the list of leds */
@@ -256,6 +329,9 @@ void led_classdev_unregister(struct led_classdev *led_cdev)
 
        flush_work(&led_cdev->set_brightness_work);
 
+       if (led_cdev->flags & LED_BRIGHT_HW_CHANGED)
+               led_remove_brightness_hw_changed(led_cdev);
+
        device_unregister(led_cdev->dev);
 
        down_write(&leds_list_lock);
index bf23ba191ad06857a3a3e1a7ab8e2725d910262f..45296aaca9daafb74d7d161285c9b124efe0a2f2 100644 (file)
@@ -270,15 +270,15 @@ static int ktd2692_parse_dt(struct ktd2692_context *led, struct device *dev,
                return -ENXIO;
 
        led->ctrl_gpio = devm_gpiod_get(dev, "ctrl", GPIOD_ASIS);
-       if (IS_ERR(led->ctrl_gpio)) {
-               ret = PTR_ERR(led->ctrl_gpio);
+       ret = PTR_ERR_OR_ZERO(led->ctrl_gpio);
+       if (ret) {
                dev_err(dev, "cannot get ctrl-gpios %d\n", ret);
                return ret;
        }
 
        led->aux_gpio = devm_gpiod_get(dev, "aux", GPIOD_ASIS);
-       if (IS_ERR(led->aux_gpio)) {
-               ret = PTR_ERR(led->aux_gpio);
+       ret = PTR_ERR_OR_ZERO(led->aux_gpio);
+       if (ret) {
                dev_err(dev, "cannot get aux-gpios %d\n", ret);
                return ret;
        }
index c9f386213e9ef16ae006e2270e2615e4bb60253e..e6f2f8b9f09ad427b83f460cf360fff3907aaa64 100644 (file)
@@ -43,6 +43,9 @@ static void led_heartbeat_function(unsigned long data)
                return;
        }
 
+       if (test_and_clear_bit(LED_BLINK_BRIGHTNESS_CHANGE, &led_cdev->work_flags))
+               led_cdev->blink_brightness = led_cdev->new_blink_brightness;
+
        /* acts like an actual heart beat -- ie thump-thump-pause... */
        switch (heartbeat_data->phase) {
        case 0:
@@ -59,26 +62,26 @@ static void led_heartbeat_function(unsigned long data)
                delay = msecs_to_jiffies(70);
                heartbeat_data->phase++;
                if (!heartbeat_data->invert)
-                       brightness = led_cdev->max_brightness;
+                       brightness = led_cdev->blink_brightness;
                break;
        case 1:
                delay = heartbeat_data->period / 4 - msecs_to_jiffies(70);
                heartbeat_data->phase++;
                if (heartbeat_data->invert)
-                       brightness = led_cdev->max_brightness;
+                       brightness = led_cdev->blink_brightness;
                break;
        case 2:
                delay = msecs_to_jiffies(70);
                heartbeat_data->phase++;
                if (!heartbeat_data->invert)
-                       brightness = led_cdev->max_brightness;
+                       brightness = led_cdev->blink_brightness;
                break;
        default:
                delay = heartbeat_data->period - heartbeat_data->period / 4 -
                        msecs_to_jiffies(70);
                heartbeat_data->phase = 0;
                if (heartbeat_data->invert)
-                       brightness = led_cdev->max_brightness;
+                       brightness = led_cdev->blink_brightness;
                break;
        }
 
@@ -133,7 +136,10 @@ static void heartbeat_trig_activate(struct led_classdev *led_cdev)
        setup_timer(&heartbeat_data->timer,
                    led_heartbeat_function, (unsigned long) led_cdev);
        heartbeat_data->phase = 0;
+       if (!led_cdev->blink_brightness)
+               led_cdev->blink_brightness = led_cdev->max_brightness;
        led_heartbeat_function(heartbeat_data->timer.data);
+       set_bit(LED_BLINK_SW, &led_cdev->work_flags);
        led_cdev->activated = true;
 }
 
@@ -145,6 +151,7 @@ static void heartbeat_trig_deactivate(struct led_classdev *led_cdev)
                del_timer_sync(&heartbeat_data->timer);
                device_remove_file(led_cdev->dev, &dev_attr_invert);
                kfree(heartbeat_data);
+               clear_bit(LED_BLINK_SW, &led_cdev->work_flags);
                led_cdev->activated = false;
        }
 }
index 775527135b93b55aea664faf91d71f2e047dab11..e199fd6c71ced6dfbe94bb4e0f7581d6ff408550 100644 (file)
@@ -52,8 +52,8 @@ struct rackmeter_dma {
 struct rackmeter_cpu {
        struct delayed_work     sniffer;
        struct rackmeter        *rm;
-       cputime64_t             prev_wall;
-       cputime64_t             prev_idle;
+       u64                     prev_wall;
+       u64                     prev_idle;
        int                     zero;
 } ____cacheline_aligned;
 
@@ -81,7 +81,7 @@ static int rackmeter_ignore_nice;
 /* This is copied from cpufreq_ondemand, maybe we should put it in
  * a common header somewhere
  */
-static inline cputime64_t get_cpu_idle_time(unsigned int cpu)
+static inline u64 get_cpu_idle_time(unsigned int cpu)
 {
        u64 retval;
 
@@ -217,23 +217,23 @@ static void rackmeter_do_timer(struct work_struct *work)
                container_of(work, struct rackmeter_cpu, sniffer.work);
        struct rackmeter *rm = rcpu->rm;
        unsigned int cpu = smp_processor_id();
-       cputime64_t cur_jiffies, total_idle_ticks;
-       unsigned int total_ticks, idle_ticks;
+       u64 cur_nsecs, total_idle_nsecs;
+       u64 total_nsecs, idle_nsecs;
        int i, offset, load, cumm, pause;
 
-       cur_jiffies = jiffies64_to_cputime64(get_jiffies_64());
-       total_ticks = (unsigned int) (cur_jiffies - rcpu->prev_wall);
-       rcpu->prev_wall = cur_jiffies;
+       cur_nsecs = jiffies64_to_nsecs(get_jiffies_64());
+       total_nsecs = cur_nsecs - rcpu->prev_wall;
+       rcpu->prev_wall = cur_nsecs;
 
-       total_idle_ticks = get_cpu_idle_time(cpu);
-       idle_ticks = (unsigned int) (total_idle_ticks - rcpu->prev_idle);
-       idle_ticks = min(idle_ticks, total_ticks);
-       rcpu->prev_idle = total_idle_ticks;
+       total_idle_nsecs = get_cpu_idle_time(cpu);
+       idle_nsecs = total_idle_nsecs - rcpu->prev_idle;
+       idle_nsecs = min(idle_nsecs, total_nsecs);
+       rcpu->prev_idle = total_idle_nsecs;
 
        /* We do a very dumb calculation to update the LEDs for now,
         * we'll do better once we have actual PWM implemented
         */
-       load = (9 * (total_ticks - idle_ticks)) / total_ticks;
+       load = div64_u64(9 * (total_nsecs - idle_nsecs), total_nsecs);
 
        offset = cpu << 3;
        cumm = 0;
@@ -278,7 +278,7 @@ static void rackmeter_init_cpu_sniffer(struct rackmeter *rm)
                        continue;
                rcpu = &rm->cpu[cpu];
                rcpu->prev_idle = get_cpu_idle_time(cpu);
-               rcpu->prev_wall = jiffies64_to_cputime64(get_jiffies_64());
+               rcpu->prev_wall = jiffies64_to_nsecs(get_jiffies_64());
                schedule_delayed_work_on(cpu, &rm->cpu[cpu].sniffer,
                                         msecs_to_jiffies(CPU_SAMPLING_RATE));
        }
index 84d2f0e4c7544b4d326df80034e6849f7e8ffbd2..d36d427a9efbf3840014b6f3cb13e597054e882e 100644 (file)
@@ -794,7 +794,7 @@ static void __wait_for_free_buffer(struct dm_bufio_client *c)
        DECLARE_WAITQUEUE(wait, current);
 
        add_wait_queue(&c->free_buffer_wait, &wait);
-       set_task_state(current, TASK_UNINTERRUPTIBLE);
+       set_current_state(TASK_UNINTERRUPTIBLE);
        dm_bufio_unlock(c);
 
        io_schedule();
index 7c6c57216bf29f301690d270ca5f05a55dea3d3f..1cb2ca9dfae36d8df009aa9f68260ae7a071ba05 100644 (file)
@@ -1210,14 +1210,14 @@ continue_locked:
                spin_unlock_irq(&cc->write_thread_wait.lock);
 
                if (unlikely(kthread_should_stop())) {
-                       set_task_state(current, TASK_RUNNING);
+                       set_current_state(TASK_RUNNING);
                        remove_wait_queue(&cc->write_thread_wait, &wait);
                        break;
                }
 
                schedule();
 
-               set_task_state(current, TASK_RUNNING);
+               set_current_state(TASK_RUNNING);
                spin_lock_irq(&cc->write_thread_wait.lock);
                __remove_wait_queue(&cc->write_thread_wait, &wait);
                goto continue_locked;
@@ -1534,18 +1534,18 @@ static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string
                return PTR_ERR(key);
        }
 
-       rcu_read_lock();
+       down_read(&key->sem);
 
        ukp = user_key_payload(key);
        if (!ukp) {
-               rcu_read_unlock();
+               up_read(&key->sem);
                key_put(key);
                kzfree(new_key_string);
                return -EKEYREVOKED;
        }
 
        if (cc->key_size != ukp->datalen) {
-               rcu_read_unlock();
+               up_read(&key->sem);
                key_put(key);
                kzfree(new_key_string);
                return -EINVAL;
@@ -1553,7 +1553,7 @@ static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string
 
        memcpy(cc->key, ukp->data, cc->key_size);
 
-       rcu_read_unlock();
+       up_read(&key->sem);
        key_put(key);
 
        /* clear the flag since following operations may invalidate previously valid key */
index 6400cffb986df21be7289dce8d2a4d4ff228a9a2..3570bcb7a4a4e5cade63c39b9071ed610da1e4ae 100644 (file)
@@ -427,7 +427,7 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
        unsigned long flags;
        struct priority_group *pg;
        struct pgpath *pgpath;
-       bool bypassed = true;
+       unsigned bypassed = 1;
 
        if (!atomic_read(&m->nr_valid_paths)) {
                clear_bit(MPATHF_QUEUE_IO, &m->flags);
@@ -466,7 +466,7 @@ check_current_pg:
         */
        do {
                list_for_each_entry(pg, &m->priority_groups, list) {
-                       if (pg->bypassed == bypassed)
+                       if (pg->bypassed == !!bypassed)
                                continue;
                        pgpath = choose_path_in_pg(m, pg, nr_bytes);
                        if (!IS_ERR_OR_NULL(pgpath)) {
index 9d7275fb541ad422b171cbe0ee43335e3e1efc27..6e702fc69a83cb27f6bc1792d4871ad18d708f71 100644 (file)
@@ -779,6 +779,10 @@ static void dm_old_request_fn(struct request_queue *q)
                int srcu_idx;
                struct dm_table *map = dm_get_live_table(md, &srcu_idx);
 
+               if (unlikely(!map)) {
+                       dm_put_live_table(md, srcu_idx);
+                       return;
+               }
                ti = dm_table_find_target(map, pos);
                dm_put_live_table(md, srcu_idx);
        }
index 82821ee0d57fac691d26e0c3f41c2ba31bb611fc..01175dac0db6361f04cd1bcad910a59202cc276d 100644 (file)
@@ -5291,6 +5291,11 @@ int md_run(struct mddev *mddev)
        if (start_readonly && mddev->ro == 0)
                mddev->ro = 2; /* read-only, but switch on first write */
 
+       /*
+        * NOTE: some pers->run(), for example r5l_recovery_log(), wakes
+        * up mddev->thread. It is important to initialize critical
+        * resources for mddev->thread BEFORE calling pers->run().
+        */
        err = pers->run(mddev);
        if (err)
                pr_warn("md: pers->run() failed ...\n");
index a6dde7cab4584e877ff22c2f5a9bac283513cda1..758d90cc2733a248f8b9b1d22cc77e2ed6dee3e1 100644 (file)
@@ -120,7 +120,7 @@ static int __check_holder(struct block_lock *lock)
 static void __wait(struct waiter *w)
 {
        for (;;) {
-               set_task_state(current, TASK_UNINTERRUPTIBLE);
+               set_current_state(TASK_UNINTERRUPTIBLE);
 
                if (!w->task)
                        break;
@@ -128,7 +128,7 @@ static void __wait(struct waiter *w)
                schedule();
        }
 
-       set_task_state(current, TASK_RUNNING);
+       set_current_state(TASK_RUNNING);
 }
 
 static void __wake_waiter(struct waiter *w)
index 0e8ed2c327b07fd849c1720d7b272dd860b949b9..302dea3296ba5ccd07740365314f45d74df49ec2 100644 (file)
@@ -162,6 +162,8 @@ struct r5l_log {
 
        /* to submit async io_units, to fulfill ordering of flush */
        struct work_struct deferred_io_work;
+       /* to disable write back during in degraded mode */
+       struct work_struct disable_writeback_work;
 };
 
 /*
@@ -611,6 +613,21 @@ static void r5l_submit_io_async(struct work_struct *work)
                r5l_do_submit_io(log, io);
 }
 
+static void r5c_disable_writeback_async(struct work_struct *work)
+{
+       struct r5l_log *log = container_of(work, struct r5l_log,
+                                          disable_writeback_work);
+       struct mddev *mddev = log->rdev->mddev;
+
+       if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
+               return;
+       pr_info("md/raid:%s: Disabling writeback cache for degraded array.\n",
+               mdname(mddev));
+       mddev_suspend(mddev);
+       log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
+       mddev_resume(mddev);
+}
+
 static void r5l_submit_current_io(struct r5l_log *log)
 {
        struct r5l_io_unit *io = log->current_io;
@@ -1393,8 +1410,6 @@ static void r5l_do_reclaim(struct r5l_log *log)
        next_checkpoint = r5c_calculate_new_cp(conf);
        spin_unlock_irq(&log->io_list_lock);
 
-       BUG_ON(reclaimable < 0);
-
        if (reclaimable == 0 || !write_super)
                return;
 
@@ -2062,7 +2077,7 @@ static int
 r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log,
                                       struct r5l_recovery_ctx *ctx)
 {
-       struct stripe_head *sh, *next;
+       struct stripe_head *sh;
        struct mddev *mddev = log->rdev->mddev;
        struct page *page;
        sector_t next_checkpoint = MaxSector;
@@ -2076,7 +2091,7 @@ r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log,
 
        WARN_ON(list_empty(&ctx->cached_list));
 
-       list_for_each_entry_safe(sh, next, &ctx->cached_list, lru) {
+       list_for_each_entry(sh, &ctx->cached_list, lru) {
                struct r5l_meta_block *mb;
                int i;
                int offset;
@@ -2126,14 +2141,39 @@ r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log,
                ctx->pos = write_pos;
                ctx->seq += 1;
                next_checkpoint = sh->log_start;
-               list_del_init(&sh->lru);
-               raid5_release_stripe(sh);
        }
        log->next_checkpoint = next_checkpoint;
        __free_page(page);
        return 0;
 }
 
+static void r5c_recovery_flush_data_only_stripes(struct r5l_log *log,
+                                                struct r5l_recovery_ctx *ctx)
+{
+       struct mddev *mddev = log->rdev->mddev;
+       struct r5conf *conf = mddev->private;
+       struct stripe_head *sh, *next;
+
+       if (ctx->data_only_stripes == 0)
+               return;
+
+       log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_BACK;
+
+       list_for_each_entry_safe(sh, next, &ctx->cached_list, lru) {
+               r5c_make_stripe_write_out(sh);
+               set_bit(STRIPE_HANDLE, &sh->state);
+               list_del_init(&sh->lru);
+               raid5_release_stripe(sh);
+       }
+
+       md_wakeup_thread(conf->mddev->thread);
+       /* reuse conf->wait_for_quiescent in recovery */
+       wait_event(conf->wait_for_quiescent,
+                  atomic_read(&conf->active_stripes) == 0);
+
+       log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
+}
+
 static int r5l_recovery_log(struct r5l_log *log)
 {
        struct mddev *mddev = log->rdev->mddev;
@@ -2160,32 +2200,31 @@ static int r5l_recovery_log(struct r5l_log *log)
        pos = ctx.pos;
        ctx.seq += 10000;
 
-       if (ctx.data_only_stripes == 0) {
-               log->next_checkpoint = ctx.pos;
-               r5l_log_write_empty_meta_block(log, ctx.pos, ctx.seq++);
-               ctx.pos = r5l_ring_add(log, ctx.pos, BLOCK_SECTORS);
-       }
 
        if ((ctx.data_only_stripes == 0) && (ctx.data_parity_stripes == 0))
                pr_debug("md/raid:%s: starting from clean shutdown\n",
                         mdname(mddev));
-       else {
+       else
                pr_debug("md/raid:%s: recovering %d data-only stripes and %d data-parity stripes\n",
                         mdname(mddev), ctx.data_only_stripes,
                         ctx.data_parity_stripes);
 
-               if (ctx.data_only_stripes > 0)
-                       if (r5c_recovery_rewrite_data_only_stripes(log, &ctx)) {
-                               pr_err("md/raid:%s: failed to rewrite stripes to journal\n",
-                                      mdname(mddev));
-                               return -EIO;
-                       }
+       if (ctx.data_only_stripes == 0) {
+               log->next_checkpoint = ctx.pos;
+               r5l_log_write_empty_meta_block(log, ctx.pos, ctx.seq++);
+               ctx.pos = r5l_ring_add(log, ctx.pos, BLOCK_SECTORS);
+       } else if (r5c_recovery_rewrite_data_only_stripes(log, &ctx)) {
+               pr_err("md/raid:%s: failed to rewrite stripes to journal\n",
+                      mdname(mddev));
+               return -EIO;
        }
 
        log->log_start = ctx.pos;
        log->seq = ctx.seq;
        log->last_checkpoint = pos;
        r5l_write_super(log, pos);
+
+       r5c_recovery_flush_data_only_stripes(log, &ctx);
        return 0;
 }
 
@@ -2247,6 +2286,10 @@ static ssize_t r5c_journal_mode_store(struct mddev *mddev,
            val > R5C_JOURNAL_MODE_WRITE_BACK)
                return -EINVAL;
 
+       if (raid5_calc_degraded(conf) > 0 &&
+           val == R5C_JOURNAL_MODE_WRITE_BACK)
+               return -EINVAL;
+
        mddev_suspend(mddev);
        conf->log->r5c_journal_mode = val;
        mddev_resume(mddev);
@@ -2301,6 +2344,16 @@ int r5c_try_caching_write(struct r5conf *conf,
                set_bit(STRIPE_R5C_CACHING, &sh->state);
        }
 
+       /*
+        * When run in degraded mode, array is set to write-through mode.
+        * This check helps drain pending write safely in the transition to
+        * write-through mode.
+        */
+       if (s->failed) {
+               r5c_make_stripe_write_out(sh);
+               return -EAGAIN;
+       }
+
        for (i = disks; i--; ) {
                dev = &sh->dev[i];
                /* if non-overwrite, use writing-out phase */
@@ -2351,6 +2404,8 @@ void r5c_release_extra_page(struct stripe_head *sh)
                        struct page *p = sh->dev[i].orig_page;
 
                        sh->dev[i].orig_page = sh->dev[i].page;
+                       clear_bit(R5_OrigPageUPTDODATE, &sh->dev[i].flags);
+
                        if (!using_disk_info_extra_page)
                                put_page(p);
                }
@@ -2555,6 +2610,19 @@ ioerr:
        return ret;
 }
 
+void r5c_update_on_rdev_error(struct mddev *mddev)
+{
+       struct r5conf *conf = mddev->private;
+       struct r5l_log *log = conf->log;
+
+       if (!log)
+               return;
+
+       if (raid5_calc_degraded(conf) > 0 &&
+           conf->log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK)
+               schedule_work(&log->disable_writeback_work);
+}
+
 int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
 {
        struct request_queue *q = bdev_get_queue(rdev->bdev);
@@ -2627,6 +2695,7 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
        spin_lock_init(&log->no_space_stripes_lock);
 
        INIT_WORK(&log->deferred_io_work, r5l_submit_io_async);
+       INIT_WORK(&log->disable_writeback_work, r5c_disable_writeback_async);
 
        log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
        INIT_LIST_HEAD(&log->stripe_in_journal_list);
@@ -2659,6 +2728,7 @@ io_kc:
 
 void r5l_exit_log(struct r5l_log *log)
 {
+       flush_work(&log->disable_writeback_work);
        md_unregister_thread(&log->reclaim_thread);
        mempool_destroy(log->meta_pool);
        bioset_free(log->bs);
index 36c13e4be9c9e5d0cedacb59910e7b4482eb6ddd..3c7e106c12a246046abc67c479f589d10966bce3 100644 (file)
@@ -556,7 +556,7 @@ static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector,
  * of the two sections, and some non-in_sync devices may
  * be insync in the section most affected by failed devices.
  */
-static int calc_degraded(struct r5conf *conf)
+int raid5_calc_degraded(struct r5conf *conf)
 {
        int degraded, degraded2;
        int i;
@@ -619,7 +619,7 @@ static int has_failed(struct r5conf *conf)
        if (conf->mddev->reshape_position == MaxSector)
                return conf->mddev->degraded > conf->max_degraded;
 
-       degraded = calc_degraded(conf);
+       degraded = raid5_calc_degraded(conf);
        if (degraded > conf->max_degraded)
                return 1;
        return 0;
@@ -1015,7 +1015,17 @@ again:
 
                        if (test_bit(R5_SkipCopy, &sh->dev[i].flags))
                                WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags));
-                       sh->dev[i].vec.bv_page = sh->dev[i].page;
+
+                       if (!op_is_write(op) &&
+                           test_bit(R5_InJournal, &sh->dev[i].flags))
+                               /*
+                                * issuing read for a page in journal, this
+                                * must be preparing for prexor in rmw; read
+                                * the data into orig_page
+                                */
+                               sh->dev[i].vec.bv_page = sh->dev[i].orig_page;
+                       else
+                               sh->dev[i].vec.bv_page = sh->dev[i].page;
                        bi->bi_vcnt = 1;
                        bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
                        bi->bi_io_vec[0].bv_offset = 0;
@@ -2380,6 +2390,13 @@ static void raid5_end_read_request(struct bio * bi)
                } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
                        clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
 
+               if (test_bit(R5_InJournal, &sh->dev[i].flags))
+                       /*
+                        * end read for a page in journal, this
+                        * must be preparing for prexor in rmw
+                        */
+                       set_bit(R5_OrigPageUPTDODATE, &sh->dev[i].flags);
+
                if (atomic_read(&rdev->read_errors))
                        atomic_set(&rdev->read_errors, 0);
        } else {
@@ -2538,7 +2555,7 @@ static void raid5_error(struct mddev *mddev, struct md_rdev *rdev)
 
        spin_lock_irqsave(&conf->device_lock, flags);
        clear_bit(In_sync, &rdev->flags);
-       mddev->degraded = calc_degraded(conf);
+       mddev->degraded = raid5_calc_degraded(conf);
        spin_unlock_irqrestore(&conf->device_lock, flags);
        set_bit(MD_RECOVERY_INTR, &mddev->recovery);
 
@@ -2552,6 +2569,7 @@ static void raid5_error(struct mddev *mddev, struct md_rdev *rdev)
                bdevname(rdev->bdev, b),
                mdname(mddev),
                conf->raid_disks - mddev->degraded);
+       r5c_update_on_rdev_error(mddev);
 }
 
 /*
@@ -2880,6 +2898,30 @@ sector_t raid5_compute_blocknr(struct stripe_head *sh, int i, int previous)
        return r_sector;
 }
 
+/*
+ * There are cases where we want handle_stripe_dirtying() and
+ * schedule_reconstruction() to delay towrite to some dev of a stripe.
+ *
+ * This function checks whether we want to delay the towrite. Specifically,
+ * we delay the towrite when:
+ *
+ *   1. degraded stripe has a non-overwrite to the missing dev, AND this
+ *      stripe has data in journal (for other devices).
+ *
+ *      In this case, when reading data for the non-overwrite dev, it is
+ *      necessary to handle complex rmw of write back cache (prexor with
+ *      orig_page, and xor with page). To keep read path simple, we would
+ *      like to flush data in journal to RAID disks first, so complex rmw
+ *      is handled in the write patch (handle_stripe_dirtying).
+ *
+ */
+static inline bool delay_towrite(struct r5dev *dev,
+                                  struct stripe_head_state *s)
+{
+       return !test_bit(R5_OVERWRITE, &dev->flags) &&
+               !test_bit(R5_Insync, &dev->flags) && s->injournal;
+}
+
 static void
 schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
                         int rcw, int expand)
@@ -2900,7 +2942,7 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
                for (i = disks; i--; ) {
                        struct r5dev *dev = &sh->dev[i];
 
-                       if (dev->towrite) {
+                       if (dev->towrite && !delay_towrite(dev, s)) {
                                set_bit(R5_LOCKED, &dev->flags);
                                set_bit(R5_Wantdrain, &dev->flags);
                                if (!expand)
@@ -3295,13 +3337,6 @@ static int want_replace(struct stripe_head *sh, int disk_idx)
        return rv;
 }
 
-/* fetch_block - checks the given member device to see if its data needs
- * to be read or computed to satisfy a request.
- *
- * Returns 1 when no more member devices need to be checked, otherwise returns
- * 0 to tell the loop in handle_stripe_fill to continue
- */
-
 static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s,
                           int disk_idx, int disks)
 {
@@ -3392,6 +3427,12 @@ static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s,
        return 0;
 }
 
+/* fetch_block - checks the given member device to see if its data needs
+ * to be read or computed to satisfy a request.
+ *
+ * Returns 1 when no more member devices need to be checked, otherwise returns
+ * 0 to tell the loop in handle_stripe_fill to continue
+ */
 static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s,
                       int disk_idx, int disks)
 {
@@ -3478,10 +3519,26 @@ static void handle_stripe_fill(struct stripe_head *sh,
         * midst of changing due to a write
         */
        if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state &&
-           !sh->reconstruct_state)
+           !sh->reconstruct_state) {
+
+               /*
+                * For degraded stripe with data in journal, do not handle
+                * read requests yet, instead, flush the stripe to raid
+                * disks first, this avoids handling complex rmw of write
+                * back cache (prexor with orig_page, and then xor with
+                * page) in the read path
+                */
+               if (s->injournal && s->failed) {
+                       if (test_bit(STRIPE_R5C_CACHING, &sh->state))
+                               r5c_make_stripe_write_out(sh);
+                       goto out;
+               }
+
                for (i = disks; i--; )
                        if (fetch_block(sh, s, i, disks))
                                break;
+       }
+out:
        set_bit(STRIPE_HANDLE, &sh->state);
 }
 
@@ -3594,6 +3651,21 @@ unhash:
                break_stripe_batch_list(head_sh, STRIPE_EXPAND_SYNC_FLAGS);
 }
 
+/*
+ * For RMW in write back cache, we need extra page in prexor to store the
+ * old data. This page is stored in dev->orig_page.
+ *
+ * This function checks whether we have data for prexor. The exact logic
+ * is:
+ *       R5_UPTODATE && (!R5_InJournal || R5_OrigPageUPTDODATE)
+ */
+static inline bool uptodate_for_rmw(struct r5dev *dev)
+{
+       return (test_bit(R5_UPTODATE, &dev->flags)) &&
+               (!test_bit(R5_InJournal, &dev->flags) ||
+                test_bit(R5_OrigPageUPTDODATE, &dev->flags));
+}
+
 static int handle_stripe_dirtying(struct r5conf *conf,
                                  struct stripe_head *sh,
                                  struct stripe_head_state *s,
@@ -3622,12 +3694,11 @@ static int handle_stripe_dirtying(struct r5conf *conf,
        } else for (i = disks; i--; ) {
                /* would I have to read this buffer for read_modify_write */
                struct r5dev *dev = &sh->dev[i];
-               if ((dev->towrite || i == sh->pd_idx || i == sh->qd_idx ||
+               if (((dev->towrite && !delay_towrite(dev, s)) ||
+                    i == sh->pd_idx || i == sh->qd_idx ||
                     test_bit(R5_InJournal, &dev->flags)) &&
                    !test_bit(R5_LOCKED, &dev->flags) &&
-                   !((test_bit(R5_UPTODATE, &dev->flags) &&
-                      (!test_bit(R5_InJournal, &dev->flags) ||
-                       dev->page != dev->orig_page)) ||
+                   !(uptodate_for_rmw(dev) ||
                      test_bit(R5_Wantcompute, &dev->flags))) {
                        if (test_bit(R5_Insync, &dev->flags))
                                rmw++;
@@ -3639,7 +3710,6 @@ static int handle_stripe_dirtying(struct r5conf *conf,
                    i != sh->pd_idx && i != sh->qd_idx &&
                    !test_bit(R5_LOCKED, &dev->flags) &&
                    !(test_bit(R5_UPTODATE, &dev->flags) ||
-                     test_bit(R5_InJournal, &dev->flags) ||
                      test_bit(R5_Wantcompute, &dev->flags))) {
                        if (test_bit(R5_Insync, &dev->flags))
                                rcw++;
@@ -3689,13 +3759,11 @@ static int handle_stripe_dirtying(struct r5conf *conf,
 
                for (i = disks; i--; ) {
                        struct r5dev *dev = &sh->dev[i];
-                       if ((dev->towrite ||
+                       if (((dev->towrite && !delay_towrite(dev, s)) ||
                             i == sh->pd_idx || i == sh->qd_idx ||
                             test_bit(R5_InJournal, &dev->flags)) &&
                            !test_bit(R5_LOCKED, &dev->flags) &&
-                           !((test_bit(R5_UPTODATE, &dev->flags) &&
-                              (!test_bit(R5_InJournal, &dev->flags) ||
-                               dev->page != dev->orig_page)) ||
+                           !(uptodate_for_rmw(dev) ||
                              test_bit(R5_Wantcompute, &dev->flags)) &&
                            test_bit(R5_Insync, &dev->flags)) {
                                if (test_bit(STRIPE_PREREAD_ACTIVE,
@@ -3722,7 +3790,6 @@ static int handle_stripe_dirtying(struct r5conf *conf,
                            i != sh->pd_idx && i != sh->qd_idx &&
                            !test_bit(R5_LOCKED, &dev->flags) &&
                            !(test_bit(R5_UPTODATE, &dev->flags) ||
-                             test_bit(R5_InJournal, &dev->flags) ||
                              test_bit(R5_Wantcompute, &dev->flags))) {
                                rcw++;
                                if (test_bit(R5_Insync, &dev->flags) &&
@@ -7025,7 +7092,7 @@ static int raid5_run(struct mddev *mddev)
        /*
         * 0 for a fully functional array, 1 or 2 for a degraded array.
         */
-       mddev->degraded = calc_degraded(conf);
+       mddev->degraded = raid5_calc_degraded(conf);
 
        if (has_failed(conf)) {
                pr_crit("md/raid:%s: not enough operational devices (%d/%d failed)\n",
@@ -7272,7 +7339,7 @@ static int raid5_spare_active(struct mddev *mddev)
                }
        }
        spin_lock_irqsave(&conf->device_lock, flags);
-       mddev->degraded = calc_degraded(conf);
+       mddev->degraded = raid5_calc_degraded(conf);
        spin_unlock_irqrestore(&conf->device_lock, flags);
        print_raid5_conf(conf);
        return count;
@@ -7632,7 +7699,7 @@ static int raid5_start_reshape(struct mddev *mddev)
                 * pre and post number of devices.
                 */
                spin_lock_irqsave(&conf->device_lock, flags);
-               mddev->degraded = calc_degraded(conf);
+               mddev->degraded = raid5_calc_degraded(conf);
                spin_unlock_irqrestore(&conf->device_lock, flags);
        }
        mddev->raid_disks = conf->raid_disks;
@@ -7720,7 +7787,7 @@ static void raid5_finish_reshape(struct mddev *mddev)
                } else {
                        int d;
                        spin_lock_irq(&conf->device_lock);
-                       mddev->degraded = calc_degraded(conf);
+                       mddev->degraded = raid5_calc_degraded(conf);
                        spin_unlock_irq(&conf->device_lock);
                        for (d = conf->raid_disks ;
                             d < conf->raid_disks - mddev->delta_disks;
index ed8e1362ab3698e6608aceee90614501bb1a69b2..1440fa26e29629c4f9acc098f0fa9035f5ff1d1a 100644 (file)
@@ -322,6 +322,11 @@ enum r5dev_flags {
                         * data and parity being written are in the journal
                         * device
                         */
+       R5_OrigPageUPTDODATE,   /* with write back cache, we read old data into
+                                * dev->orig_page for prexor. When this flag is
+                                * set, orig_page contains latest data in the
+                                * raid disk.
+                                */
 };
 
 /*
@@ -753,6 +758,7 @@ extern sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector,
 extern struct stripe_head *
 raid5_get_active_stripe(struct r5conf *conf, sector_t sector,
                        int previous, int noblock, int noquiesce);
+extern int raid5_calc_degraded(struct r5conf *conf);
 extern int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev);
 extern void r5l_exit_log(struct r5l_log *log);
 extern int r5l_write_stripe(struct r5l_log *log, struct stripe_head *head_sh);
@@ -781,4 +787,5 @@ extern void r5c_flush_cache(struct r5conf *conf, int num);
 extern void r5c_check_stripe_cache_usage(struct r5conf *conf);
 extern void r5c_check_cached_full_stripe(struct r5conf *conf);
 extern struct md_sysfs_entry r5c_journal_mode;
+extern void r5c_update_on_rdev_error(struct mddev *mddev);
 #endif
index 0ea4efb3de6683ee2dca71b3b7e20bc7f5f1e761..ccda41c2c9e41ed37969280420b7d98609c6641b 100644 (file)
@@ -30,8 +30,9 @@
 
 #include "cec-priv.h"
 
-static int cec_report_features(struct cec_adapter *adap, unsigned int la_idx);
-static int cec_report_phys_addr(struct cec_adapter *adap, unsigned int la_idx);
+static void cec_fill_msg_report_features(struct cec_adapter *adap,
+                                        struct cec_msg *msg,
+                                        unsigned int la_idx);
 
 /*
  * 400 ms is the time it takes for one 16 byte message to be
@@ -288,10 +289,10 @@ static void cec_data_cancel(struct cec_data *data)
 
        /* Mark it as an error */
        data->msg.tx_ts = ktime_get_ns();
-       data->msg.tx_status = CEC_TX_STATUS_ERROR |
-                             CEC_TX_STATUS_MAX_RETRIES;
+       data->msg.tx_status |= CEC_TX_STATUS_ERROR |
+                              CEC_TX_STATUS_MAX_RETRIES;
+       data->msg.tx_error_cnt++;
        data->attempts = 0;
-       data->msg.tx_error_cnt = 1;
        /* Queue transmitted message for monitoring purposes */
        cec_queue_msg_monitor(data->adap, &data->msg, 1);
 
@@ -611,8 +612,7 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg,
        }
        memset(msg->msg + msg->len, 0, sizeof(msg->msg) - msg->len);
        if (msg->len == 1) {
-               if (cec_msg_initiator(msg) != 0xf ||
-                   cec_msg_destination(msg) == 0xf) {
+               if (cec_msg_destination(msg) == 0xf) {
                        dprintk(1, "cec_transmit_msg: invalid poll message\n");
                        return -EINVAL;
                }
@@ -637,7 +637,7 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg,
                dprintk(1, "cec_transmit_msg: destination is the adapter itself\n");
                return -EINVAL;
        }
-       if (cec_msg_initiator(msg) != 0xf &&
+       if (msg->len > 1 && adap->is_configured &&
            !cec_has_log_addr(adap, cec_msg_initiator(msg))) {
                dprintk(1, "cec_transmit_msg: initiator has unknown logical address %d\n",
                        cec_msg_initiator(msg));
@@ -851,7 +851,7 @@ static const u8 cec_msg_size[256] = {
        [CEC_MSG_REQUEST_ARC_TERMINATION] = 2 | DIRECTED,
        [CEC_MSG_TERMINATE_ARC] = 2 | DIRECTED,
        [CEC_MSG_REQUEST_CURRENT_LATENCY] = 4 | BCAST,
-       [CEC_MSG_REPORT_CURRENT_LATENCY] = 7 | BCAST,
+       [CEC_MSG_REPORT_CURRENT_LATENCY] = 6 | BCAST,
        [CEC_MSG_CDC_MESSAGE] = 2 | BCAST,
 };
 
@@ -1071,7 +1071,7 @@ static int cec_config_log_addr(struct cec_adapter *adap,
 
        /* Send poll message */
        msg.len = 1;
-       msg.msg[0] = 0xf0 | log_addr;
+       msg.msg[0] = (log_addr << 4) | log_addr;
        err = cec_transmit_msg_fh(adap, &msg, NULL, true);
 
        /*
@@ -1205,7 +1205,7 @@ static int cec_config_thread_func(void *arg)
                las->log_addr[i] = CEC_LOG_ADDR_INVALID;
                if (last_la == CEC_LOG_ADDR_INVALID ||
                    last_la == CEC_LOG_ADDR_UNREGISTERED ||
-                   !(last_la & type2mask[type]))
+                   !((1 << last_la) & type2mask[type]))
                        last_la = la_list[0];
 
                err = cec_config_log_addr(adap, i, last_la);
@@ -1250,30 +1250,49 @@ configured:
                for (i = 1; i < las->num_log_addrs; i++)
                        las->log_addr[i] = CEC_LOG_ADDR_INVALID;
        }
+       for (i = las->num_log_addrs; i < CEC_MAX_LOG_ADDRS; i++)
+               las->log_addr[i] = CEC_LOG_ADDR_INVALID;
        adap->is_configured = true;
        adap->is_configuring = false;
        cec_post_state_event(adap);
-       mutex_unlock(&adap->lock);
 
+       /*
+        * Now post the Report Features and Report Physical Address broadcast
+        * messages. Note that these are non-blocking transmits, meaning that
+        * they are just queued up and once adap->lock is unlocked the main
+        * thread will kick in and start transmitting these.
+        *
+        * If after this function is done (but before one or more of these
+        * messages are actually transmitted) the CEC adapter is unconfigured,
+        * then any remaining messages will be dropped by the main thread.
+        */
        for (i = 0; i < las->num_log_addrs; i++) {
+               struct cec_msg msg = {};
+
                if (las->log_addr[i] == CEC_LOG_ADDR_INVALID ||
                    (las->flags & CEC_LOG_ADDRS_FL_CDC_ONLY))
                        continue;
 
-               /*
-                * Report Features must come first according
-                * to CEC 2.0
-                */
-               if (las->log_addr[i] != CEC_LOG_ADDR_UNREGISTERED)
-                       cec_report_features(adap, i);
-               cec_report_phys_addr(adap, i);
+               msg.msg[0] = (las->log_addr[i] << 4) | 0x0f;
+
+               /* Report Features must come first according to CEC 2.0 */
+               if (las->log_addr[i] != CEC_LOG_ADDR_UNREGISTERED &&
+                   adap->log_addrs.cec_version >= CEC_OP_CEC_VERSION_2_0) {
+                       cec_fill_msg_report_features(adap, &msg, i);
+                       cec_transmit_msg_fh(adap, &msg, NULL, false);
+               }
+
+               /* Report Physical Address */
+               cec_msg_report_physical_addr(&msg, adap->phys_addr,
+                                            las->primary_device_type[i]);
+               dprintk(2, "config: la %d pa %x.%x.%x.%x\n",
+                       las->log_addr[i],
+                       cec_phys_addr_exp(adap->phys_addr));
+               cec_transmit_msg_fh(adap, &msg, NULL, false);
        }
-       for (i = las->num_log_addrs; i < CEC_MAX_LOG_ADDRS; i++)
-               las->log_addr[i] = CEC_LOG_ADDR_INVALID;
-       mutex_lock(&adap->lock);
        adap->kthread_config = NULL;
-       mutex_unlock(&adap->lock);
        complete(&adap->config_completion);
+       mutex_unlock(&adap->lock);
        return 0;
 
 unconfigure:
@@ -1526,52 +1545,32 @@ EXPORT_SYMBOL_GPL(cec_s_log_addrs);
 
 /* High-level core CEC message handling */
 
-/* Transmit the Report Features message */
-static int cec_report_features(struct cec_adapter *adap, unsigned int la_idx)
+/* Fill in the Report Features message */
+static void cec_fill_msg_report_features(struct cec_adapter *adap,
+                                        struct cec_msg *msg,
+                                        unsigned int la_idx)
 {
-       struct cec_msg msg = { };
        const struct cec_log_addrs *las = &adap->log_addrs;
        const u8 *features = las->features[la_idx];
        bool op_is_dev_features = false;
        unsigned int idx;
 
-       /* This is 2.0 and up only */
-       if (adap->log_addrs.cec_version < CEC_OP_CEC_VERSION_2_0)
-               return 0;
-
        /* Report Features */
-       msg.msg[0] = (las->log_addr[la_idx] << 4) | 0x0f;
-       msg.len = 4;
-       msg.msg[1] = CEC_MSG_REPORT_FEATURES;
-       msg.msg[2] = adap->log_addrs.cec_version;
-       msg.msg[3] = las->all_device_types[la_idx];
+       msg->msg[0] = (las->log_addr[la_idx] << 4) | 0x0f;
+       msg->len = 4;
+       msg->msg[1] = CEC_MSG_REPORT_FEATURES;
+       msg->msg[2] = adap->log_addrs.cec_version;
+       msg->msg[3] = las->all_device_types[la_idx];
 
        /* Write RC Profiles first, then Device Features */
        for (idx = 0; idx < ARRAY_SIZE(las->features[0]); idx++) {
-               msg.msg[msg.len++] = features[idx];
+               msg->msg[msg->len++] = features[idx];
                if ((features[idx] & CEC_OP_FEAT_EXT) == 0) {
                        if (op_is_dev_features)
                                break;
                        op_is_dev_features = true;
                }
        }
-       return cec_transmit_msg(adap, &msg, false);
-}
-
-/* Transmit the Report Physical Address message */
-static int cec_report_phys_addr(struct cec_adapter *adap, unsigned int la_idx)
-{
-       const struct cec_log_addrs *las = &adap->log_addrs;
-       struct cec_msg msg = { };
-
-       /* Report Physical Address */
-       msg.msg[0] = (las->log_addr[la_idx] << 4) | 0x0f;
-       cec_msg_report_physical_addr(&msg, adap->phys_addr,
-                                    las->primary_device_type[la_idx]);
-       dprintk(2, "config: la %d pa %x.%x.%x.%x\n",
-               las->log_addr[la_idx],
-                       cec_phys_addr_exp(adap->phys_addr));
-       return cec_transmit_msg(adap, &msg, false);
 }
 
 /* Transmit the Feature Abort message */
@@ -1777,9 +1776,10 @@ static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
        }
 
        case CEC_MSG_GIVE_FEATURES:
-               if (adap->log_addrs.cec_version >= CEC_OP_CEC_VERSION_2_0)
-                       return cec_report_features(adap, la_idx);
-               return 0;
+               if (adap->log_addrs.cec_version < CEC_OP_CEC_VERSION_2_0)
+                       return cec_feature_abort(adap, msg);
+               cec_fill_msg_report_features(adap, &tx_cec_msg, la_idx);
+               return cec_transmit_msg(adap, &tx_cec_msg, false);
 
        default:
                /*
index bc5e8cfe7ca235134cfcaa5723f27fa146d5d0c5..8f11d7e459931bb5a8ed74569781dc83d4bd6422 100644 (file)
@@ -719,6 +719,9 @@ static void dvb_net_ule_check_crc(struct dvb_net_ule_handle *h,
                skb_copy_from_linear_data(h->priv->ule_skb, dest_addr,
                                          ETH_ALEN);
                skb_pull(h->priv->ule_skb, ETH_ALEN);
+       } else {
+               /* dest_addr buffer is only valid if h->priv->ule_dbit == 0 */
+               eth_zero_addr(dest_addr);
        }
 
        /* Handle ULE Extension Headers. */
@@ -750,16 +753,8 @@ static void dvb_net_ule_check_crc(struct dvb_net_ule_handle *h,
        if (!h->priv->ule_bridged) {
                skb_push(h->priv->ule_skb, ETH_HLEN);
                h->ethh = (struct ethhdr *)h->priv->ule_skb->data;
-               if (!h->priv->ule_dbit) {
-                       /*
-                        * dest_addr buffer is only valid if
-                        * h->priv->ule_dbit == 0
-                        */
-                       memcpy(h->ethh->h_dest, dest_addr, ETH_ALEN);
-                       eth_zero_addr(h->ethh->h_source);
-               } else /* zeroize source and dest */
-                       memset(h->ethh, 0, ETH_ALEN * 2);
-
+               memcpy(h->ethh->h_dest, dest_addr, ETH_ALEN);
+               eth_zero_addr(h->ethh->h_source);
                h->ethh->h_proto = htons(h->priv->ule_sndu_type);
        }
        /* else:  skb is in correct state; nothing to do. */
index b31fa6fae009171a8edda132b224a52631a24b39..b979ea148251deab48fbfca7a0141aa408010f5e 100644 (file)
@@ -655,6 +655,7 @@ config VIDEO_S5K6A3
 config VIDEO_S5K4ECGX
         tristate "Samsung S5K4ECGX sensor support"
         depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+       select CRC32
         ---help---
           This is a V4L2 sensor-level driver for Samsung S5K4ECGX 5M
           camera sensor with an embedded SoC image signal processor.
index 59872b31f832cb7983337e1ce1290a5eea1aa36a..f4e92bdfe1926cb71be5a9dc29de7b7608903950 100644 (file)
@@ -2741,9 +2741,7 @@ static const struct v4l2_subdev_internal_ops smiapp_internal_ops = {
  * I2C Driver
  */
 
-#ifdef CONFIG_PM
-
-static int smiapp_suspend(struct device *dev)
+static int __maybe_unused smiapp_suspend(struct device *dev)
 {
        struct i2c_client *client = to_i2c_client(dev);
        struct v4l2_subdev *subdev = i2c_get_clientdata(client);
@@ -2768,7 +2766,7 @@ static int smiapp_suspend(struct device *dev)
        return 0;
 }
 
-static int smiapp_resume(struct device *dev)
+static int __maybe_unused smiapp_resume(struct device *dev)
 {
        struct i2c_client *client = to_i2c_client(dev);
        struct v4l2_subdev *subdev = i2c_get_clientdata(client);
@@ -2783,13 +2781,6 @@ static int smiapp_resume(struct device *dev)
        return rval;
 }
 
-#else
-
-#define smiapp_suspend NULL
-#define smiapp_resume  NULL
-
-#endif /* CONFIG_PM */
-
 static struct smiapp_hwconfig *smiapp_get_hwconfig(struct device *dev)
 {
        struct smiapp_hwconfig *hwcfg;
@@ -2913,13 +2904,9 @@ static int smiapp_probe(struct i2c_client *client,
        if (IS_ERR(sensor->xshutdown))
                return PTR_ERR(sensor->xshutdown);
 
-       pm_runtime_enable(&client->dev);
-
-       rval = pm_runtime_get_sync(&client->dev);
-       if (rval < 0) {
-               rval = -ENODEV;
-               goto out_power_off;
-       }
+       rval = smiapp_power_on(&client->dev);
+       if (rval < 0)
+               return rval;
 
        rval = smiapp_identify_module(sensor);
        if (rval) {
@@ -3100,6 +3087,9 @@ static int smiapp_probe(struct i2c_client *client,
        if (rval < 0)
                goto out_media_entity_cleanup;
 
+       pm_runtime_set_active(&client->dev);
+       pm_runtime_get_noresume(&client->dev);
+       pm_runtime_enable(&client->dev);
        pm_runtime_set_autosuspend_delay(&client->dev, 1000);
        pm_runtime_use_autosuspend(&client->dev);
        pm_runtime_put_autosuspend(&client->dev);
@@ -3113,8 +3103,7 @@ out_cleanup:
        smiapp_cleanup(sensor);
 
 out_power_off:
-       pm_runtime_put(&client->dev);
-       pm_runtime_disable(&client->dev);
+       smiapp_power_off(&client->dev);
 
        return rval;
 }
@@ -3127,8 +3116,10 @@ static int smiapp_remove(struct i2c_client *client)
 
        v4l2_async_unregister_subdev(subdev);
 
-       pm_runtime_suspend(&client->dev);
        pm_runtime_disable(&client->dev);
+       if (!pm_runtime_status_suspended(&client->dev))
+               smiapp_power_off(&client->dev);
+       pm_runtime_set_suspended(&client->dev);
 
        for (i = 0; i < sensor->ssds_used; i++) {
                v4l2_device_unregister_subdev(&sensor->ssds[i].sd);
index 3a0fe8cc64e94c5199730e00a6ce0e6fcb41ffb2..48646a7f3fb00c2e35944e656089a9031b2eaf01 100644 (file)
@@ -291,8 +291,12 @@ static void tvp5150_selmux(struct v4l2_subdev *sd)
        tvp5150_write(sd, TVP5150_OP_MODE_CTL, opmode);
        tvp5150_write(sd, TVP5150_VD_IN_SRC_SEL_1, input);
 
-       /* Svideo should enable YCrCb output and disable GPCL output
-        * For Composite and TV, it should be the reverse
+       /*
+        * Setup the FID/GLCO/VLK/HVLK and INTREQ/GPCL/VBLK output signals. For
+        * S-Video we output the vertical lock (VLK) signal on FID/GLCO/VLK/HVLK
+        * and set INTREQ/GPCL/VBLK to logic 0. For composite we output the
+        * field indicator (FID) signal on FID/GLCO/VLK/HVLK and set
+        * INTREQ/GPCL/VBLK to logic 1.
         */
        val = tvp5150_read(sd, TVP5150_MISC_CTL);
        if (val < 0) {
@@ -301,9 +305,9 @@ static void tvp5150_selmux(struct v4l2_subdev *sd)
        }
 
        if (decoder->input == TVP5150_SVIDEO)
-               val = (val & ~0x40) | 0x10;
+               val = (val & ~TVP5150_MISC_CTL_GPCL) | TVP5150_MISC_CTL_HVLK;
        else
-               val = (val & ~0x10) | 0x40;
+               val = (val & ~TVP5150_MISC_CTL_HVLK) | TVP5150_MISC_CTL_GPCL;
        tvp5150_write(sd, TVP5150_MISC_CTL, val);
 };
 
@@ -455,7 +459,12 @@ static const struct i2c_reg_value tvp5150_init_enable[] = {
        },{     /* Automatic offset and AGC enabled */
                TVP5150_ANAL_CHL_CTL, 0x15
        },{     /* Activate YCrCb output 0x9 or 0xd ? */
-               TVP5150_MISC_CTL, 0x6f
+               TVP5150_MISC_CTL, TVP5150_MISC_CTL_GPCL |
+                                 TVP5150_MISC_CTL_INTREQ_OE |
+                                 TVP5150_MISC_CTL_YCBCR_OE |
+                                 TVP5150_MISC_CTL_SYNC_OE |
+                                 TVP5150_MISC_CTL_VBLANK |
+                                 TVP5150_MISC_CTL_CLOCK_OE,
        },{     /* Activates video std autodetection for all standards */
                TVP5150_AUTOSW_MSK, 0x0
        },{     /* Default format: 0x47. For 4:2:2: 0x40 */
@@ -861,8 +870,6 @@ static int tvp5150_fill_fmt(struct v4l2_subdev *sd,
 
        f = &format->format;
 
-       tvp5150_reset(sd, 0);
-
        f->width = decoder->rect.width;
        f->height = decoder->rect.height / 2;
 
@@ -1051,21 +1058,27 @@ static const struct media_entity_operations tvp5150_sd_media_ops = {
 static int tvp5150_s_stream(struct v4l2_subdev *sd, int enable)
 {
        struct tvp5150 *decoder = to_tvp5150(sd);
-       /* Output format: 8-bit ITU-R BT.656 with embedded syncs */
-       int val = 0x09;
-
-       /* Output format: 8-bit 4:2:2 YUV with discrete sync */
-       if (decoder->mbus_type == V4L2_MBUS_PARALLEL)
-               val = 0x0d;
+       int val;
 
-       /* Initializes TVP5150 to its default values */
-       /* # set PCLK (27MHz) */
-       tvp5150_write(sd, TVP5150_CONF_SHARED_PIN, 0x00);
+       /* Enable or disable the video output signals. */
+       val = tvp5150_read(sd, TVP5150_MISC_CTL);
+       if (val < 0)
+               return val;
+
+       val &= ~(TVP5150_MISC_CTL_YCBCR_OE | TVP5150_MISC_CTL_SYNC_OE |
+                TVP5150_MISC_CTL_CLOCK_OE);
+
+       if (enable) {
+               /*
+                * Enable the YCbCr and clock outputs. In discrete sync mode
+                * (non-BT.656) additionally enable the the sync outputs.
+                */
+               val |= TVP5150_MISC_CTL_YCBCR_OE | TVP5150_MISC_CTL_CLOCK_OE;
+               if (decoder->mbus_type == V4L2_MBUS_PARALLEL)
+                       val |= TVP5150_MISC_CTL_SYNC_OE;
+       }
 
-       if (enable)
-               tvp5150_write(sd, TVP5150_MISC_CTL, val);
-       else
-               tvp5150_write(sd, TVP5150_MISC_CTL, 0x00);
+       tvp5150_write(sd, TVP5150_MISC_CTL, val);
 
        return 0;
 }
@@ -1524,7 +1537,6 @@ static int tvp5150_probe(struct i2c_client *c,
                res = core->hdl.error;
                goto err;
        }
-       v4l2_ctrl_handler_setup(&core->hdl);
 
        /* Default is no cropping */
        core->rect.top = 0;
@@ -1535,6 +1547,8 @@ static int tvp5150_probe(struct i2c_client *c,
        core->rect.left = 0;
        core->rect.width = TVP5150_H_MAX;
 
+       tvp5150_reset(sd, 0);   /* Calls v4l2_ctrl_handler_setup() */
+
        res = v4l2_async_register_subdev(sd);
        if (res < 0)
                goto err;
index 25a994944918703f064eee85a6017ca7010dca4b..30a48c28d05ab5d46d9eff505005ebf3f90e6409 100644 (file)
@@ -9,6 +9,15 @@
 #define TVP5150_ANAL_CHL_CTL         0x01 /* Analog channel controls */
 #define TVP5150_OP_MODE_CTL          0x02 /* Operation mode controls */
 #define TVP5150_MISC_CTL             0x03 /* Miscellaneous controls */
+#define TVP5150_MISC_CTL_VBLK_GPCL     BIT(7)
+#define TVP5150_MISC_CTL_GPCL          BIT(6)
+#define TVP5150_MISC_CTL_INTREQ_OE     BIT(5)
+#define TVP5150_MISC_CTL_HVLK          BIT(4)
+#define TVP5150_MISC_CTL_YCBCR_OE      BIT(3)
+#define TVP5150_MISC_CTL_SYNC_OE       BIT(2)
+#define TVP5150_MISC_CTL_VBLANK                BIT(1)
+#define TVP5150_MISC_CTL_CLOCK_OE      BIT(0)
+
 #define TVP5150_AUTOSW_MSK           0x04 /* Autoswitch mask: TVP5150A / TVP5150AM */
 
 /* Reserved 05h */
index 979634000597f79124befabb48af909d4b8f6a6e..d5c911c09e2b792e767970f32c463862617cec19 100644 (file)
@@ -308,9 +308,7 @@ static void cobalt_pci_iounmap(struct cobalt *cobalt, struct pci_dev *pci_dev)
 static void cobalt_free_msi(struct cobalt *cobalt, struct pci_dev *pci_dev)
 {
        free_irq(pci_dev->irq, (void *)cobalt);
-
-       if (cobalt->msi_enabled)
-               pci_disable_msi(pci_dev);
+       pci_free_irq_vectors(pci_dev);
 }
 
 static int cobalt_setup_pci(struct cobalt *cobalt, struct pci_dev *pci_dev,
@@ -387,14 +385,12 @@ static int cobalt_setup_pci(struct cobalt *cobalt, struct pci_dev *pci_dev,
           from being generated. */
        cobalt_set_interrupt(cobalt, false);
 
-       if (pci_enable_msi_range(pci_dev, 1, 1) < 1) {
+       if (pci_alloc_irq_vectors(pci_dev, 1, 1, PCI_IRQ_MSI) < 1) {
                cobalt_err("Could not enable MSI\n");
-               cobalt->msi_enabled = false;
                ret = -EIO;
                goto err_release;
        }
        msi_config_show(cobalt, pci_dev);
-       cobalt->msi_enabled = true;
 
        /* Register IRQ */
        if (request_irq(pci_dev->irq, cobalt_irq_handler, IRQF_SHARED,
index ed00dc9d93995e03ddf128dfb476dbc73ae086a3..00f773ec359ad954a15859329fb541fd5ab2a848 100644 (file)
@@ -287,8 +287,6 @@ struct cobalt {
        u32 irq_none;
        u32 irq_full_fifo;
 
-       bool msi_enabled;
-
        /* omnitek dma */
        int dma_channels;
        int first_fifo_channel;
index 07fa08be9e994a3f7d5952251b73f871fe4ecdca..d54ebe7e02150f7240f58da51a32d025026bdf34 100644 (file)
@@ -97,14 +97,13 @@ struct pctv452e_state {
        u8 c;      /* transaction counter, wraps around...  */
        u8 initialized; /* set to 1 if 0x15 has been sent */
        u16 last_rc_key;
-
-       unsigned char data[80];
 };
 
 static int tt3650_ci_msg(struct dvb_usb_device *d, u8 cmd, u8 *data,
                         unsigned int write_len, unsigned int read_len)
 {
        struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
+       u8 *buf;
        u8 id;
        unsigned int rlen;
        int ret;
@@ -114,36 +113,39 @@ static int tt3650_ci_msg(struct dvb_usb_device *d, u8 cmd, u8 *data,
                return -EIO;
        }
 
-       mutex_lock(&state->ca_mutex);
+       buf = kmalloc(64, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
        id = state->c++;
 
-       state->data[0] = SYNC_BYTE_OUT;
-       state->data[1] = id;
-       state->data[2] = cmd;
-       state->data[3] = write_len;
+       buf[0] = SYNC_BYTE_OUT;
+       buf[1] = id;
+       buf[2] = cmd;
+       buf[3] = write_len;
 
-       memcpy(state->data + 4, data, write_len);
+       memcpy(buf + 4, data, write_len);
 
        rlen = (read_len > 0) ? 64 : 0;
-       ret = dvb_usb_generic_rw(d, state->data, 4 + write_len,
-                                 state->data, rlen, /* delay_ms */ 0);
+       ret = dvb_usb_generic_rw(d, buf, 4 + write_len,
+                                 buf, rlen, /* delay_ms */ 0);
        if (0 != ret)
                goto failed;
 
        ret = -EIO;
-       if (SYNC_BYTE_IN != state->data[0] || id != state->data[1])
+       if (SYNC_BYTE_IN != buf[0] || id != buf[1])
                goto failed;
 
-       memcpy(data, state->data + 4, read_len);
+       memcpy(data, buf + 4, read_len);
 
-       mutex_unlock(&state->ca_mutex);
+       kfree(buf);
        return 0;
 
 failed:
        err("CI error %d; %02X %02X %02X -> %*ph.",
-            ret, SYNC_BYTE_OUT, id, cmd, 3, state->data);
+            ret, SYNC_BYTE_OUT, id, cmd, 3, buf);
 
-       mutex_unlock(&state->ca_mutex);
+       kfree(buf);
        return ret;
 }
 
@@ -410,53 +412,57 @@ static int pctv452e_i2c_msg(struct dvb_usb_device *d, u8 addr,
                                u8 *rcv_buf, u8 rcv_len)
 {
        struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
+       u8 *buf;
        u8 id;
        int ret;
 
-       mutex_lock(&state->ca_mutex);
+       buf = kmalloc(64, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
        id = state->c++;
 
        ret = -EINVAL;
        if (snd_len > 64 - 7 || rcv_len > 64 - 7)
                goto failed;
 
-       state->data[0] = SYNC_BYTE_OUT;
-       state->data[1] = id;
-       state->data[2] = PCTV_CMD_I2C;
-       state->data[3] = snd_len + 3;
-       state->data[4] = addr << 1;
-       state->data[5] = snd_len;
-       state->data[6] = rcv_len;
+       buf[0] = SYNC_BYTE_OUT;
+       buf[1] = id;
+       buf[2] = PCTV_CMD_I2C;
+       buf[3] = snd_len + 3;
+       buf[4] = addr << 1;
+       buf[5] = snd_len;
+       buf[6] = rcv_len;
 
-       memcpy(state->data + 7, snd_buf, snd_len);
+       memcpy(buf + 7, snd_buf, snd_len);
 
-       ret = dvb_usb_generic_rw(d, state->data, 7 + snd_len,
-                                 state->data, /* rcv_len */ 64,
+       ret = dvb_usb_generic_rw(d, buf, 7 + snd_len,
+                                 buf, /* rcv_len */ 64,
                                  /* delay_ms */ 0);
        if (ret < 0)
                goto failed;
 
        /* TT USB protocol error. */
        ret = -EIO;
-       if (SYNC_BYTE_IN != state->data[0] || id != state->data[1])
+       if (SYNC_BYTE_IN != buf[0] || id != buf[1])
                goto failed;
 
        /* I2C device didn't respond as expected. */
        ret = -EREMOTEIO;
-       if (state->data[5] < snd_len || state->data[6] < rcv_len)
+       if (buf[5] < snd_len || buf[6] < rcv_len)
                goto failed;
 
-       memcpy(rcv_buf, state->data + 7, rcv_len);
-       mutex_unlock(&state->ca_mutex);
+       memcpy(rcv_buf, buf + 7, rcv_len);
 
+       kfree(buf);
        return rcv_len;
 
 failed:
        err("I2C error %d; %02X %02X  %02X %02X %02X -> %*ph",
             ret, SYNC_BYTE_OUT, id, addr << 1, snd_len, rcv_len,
-            7, state->data);
+            7, buf);
 
-       mutex_unlock(&state->ca_mutex);
+       kfree(buf);
        return ret;
 }
 
@@ -505,7 +511,7 @@ static u32 pctv452e_i2c_func(struct i2c_adapter *adapter)
 static int pctv452e_power_ctrl(struct dvb_usb_device *d, int i)
 {
        struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
-       u8 *rx;
+       u8 *b0, *rx;
        int ret;
 
        info("%s: %d\n", __func__, i);
@@ -516,11 +522,12 @@ static int pctv452e_power_ctrl(struct dvb_usb_device *d, int i)
        if (state->initialized)
                return 0;
 
-       rx = kmalloc(PCTV_ANSWER_LEN, GFP_KERNEL);
-       if (!rx)
+       b0 = kmalloc(5 + PCTV_ANSWER_LEN, GFP_KERNEL);
+       if (!b0)
                return -ENOMEM;
 
-       mutex_lock(&state->ca_mutex);
+       rx = b0 + 5;
+
        /* hmm where shoud this should go? */
        ret = usb_set_interface(d->udev, 0, ISOC_INTERFACE_ALTERNATIVE);
        if (ret != 0)
@@ -528,66 +535,70 @@ static int pctv452e_power_ctrl(struct dvb_usb_device *d, int i)
                        __func__, ret);
 
        /* this is a one-time initialization, dont know where to put */
-       state->data[0] = 0xaa;
-       state->data[1] = state->c++;
-       state->data[2] = PCTV_CMD_RESET;
-       state->data[3] = 1;
-       state->data[4] = 0;
+       b0[0] = 0xaa;
+       b0[1] = state->c++;
+       b0[2] = PCTV_CMD_RESET;
+       b0[3] = 1;
+       b0[4] = 0;
        /* reset board */
-       ret = dvb_usb_generic_rw(d, state->data, 5, rx, PCTV_ANSWER_LEN, 0);
+       ret = dvb_usb_generic_rw(d, b0, 5, rx, PCTV_ANSWER_LEN, 0);
        if (ret)
                goto ret;
 
-       state->data[1] = state->c++;
-       state->data[4] = 1;
+       b0[1] = state->c++;
+       b0[4] = 1;
        /* reset board (again?) */
-       ret = dvb_usb_generic_rw(d, state->data, 5, rx, PCTV_ANSWER_LEN, 0);
+       ret = dvb_usb_generic_rw(d, b0, 5, rx, PCTV_ANSWER_LEN, 0);
        if (ret)
                goto ret;
 
        state->initialized = 1;
 
 ret:
-       mutex_unlock(&state->ca_mutex);
-       kfree(rx);
+       kfree(b0);
        return ret;
 }
 
 static int pctv452e_rc_query(struct dvb_usb_device *d)
 {
        struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
+       u8 *b, *rx;
        int ret, i;
        u8 id;
 
-       mutex_lock(&state->ca_mutex);
+       b = kmalloc(CMD_BUFFER_SIZE + PCTV_ANSWER_LEN, GFP_KERNEL);
+       if (!b)
+               return -ENOMEM;
+
+       rx = b + CMD_BUFFER_SIZE;
+
        id = state->c++;
 
        /* prepare command header  */
-       state->data[0] = SYNC_BYTE_OUT;
-       state->data[1] = id;
-       state->data[2] = PCTV_CMD_IR;
-       state->data[3] = 0;
+       b[0] = SYNC_BYTE_OUT;
+       b[1] = id;
+       b[2] = PCTV_CMD_IR;
+       b[3] = 0;
 
        /* send ir request */
-       ret = dvb_usb_generic_rw(d, state->data, 4,
-                                state->data, PCTV_ANSWER_LEN, 0);
+       ret = dvb_usb_generic_rw(d, b, 4, rx, PCTV_ANSWER_LEN, 0);
        if (ret != 0)
                goto ret;
 
        if (debug > 3) {
-               info("%s: read: %2d: %*ph: ", __func__, ret, 3, state->data);
-               for (i = 0; (i < state->data[3]) && ((i + 3) < PCTV_ANSWER_LEN); i++)
-                       info(" %02x", state->data[i + 3]);
+               info("%s: read: %2d: %*ph: ", __func__, ret, 3, rx);
+               for (i = 0; (i < rx[3]) && ((i+3) < PCTV_ANSWER_LEN); i++)
+                       info(" %02x", rx[i+3]);
 
                info("\n");
        }
 
-       if ((state->data[3] == 9) &&  (state->data[12] & 0x01)) {
+       if ((rx[3] == 9) &&  (rx[12] & 0x01)) {
                /* got a "press" event */
-               state->last_rc_key = RC_SCANCODE_RC5(state->data[7], state->data[6]);
+               state->last_rc_key = RC_SCANCODE_RC5(rx[7], rx[6]);
                if (debug > 2)
                        info("%s: cmd=0x%02x sys=0x%02x\n",
-                               __func__, state->data[6], state->data[7]);
+                               __func__, rx[6], rx[7]);
 
                rc_keydown(d->rc_dev, RC_TYPE_RC5, state->last_rc_key, 0);
        } else if (state->last_rc_key) {
@@ -595,7 +606,7 @@ static int pctv452e_rc_query(struct dvb_usb_device *d)
                state->last_rc_key = 0;
        }
 ret:
-       mutex_unlock(&state->ca_mutex);
+       kfree(b);
        return ret;
 }
 
index a4dcaec31d02c06d82cddd81b0a47a4eb895608a..8c1f926567ec04425f36fd9fd68d80cf37df98a4 100644 (file)
@@ -218,22 +218,30 @@ static int smsusb_start_streaming(struct smsusb_device_t *dev)
 static int smsusb_sendrequest(void *context, void *buffer, size_t size)
 {
        struct smsusb_device_t *dev = (struct smsusb_device_t *) context;
-       struct sms_msg_hdr *phdr = (struct sms_msg_hdr *) buffer;
-       int dummy;
+       struct sms_msg_hdr *phdr;
+       int dummy, ret;
 
        if (dev->state != SMSUSB_ACTIVE) {
                pr_debug("Device not active yet\n");
                return -ENOENT;
        }
 
+       phdr = kmalloc(size, GFP_KERNEL);
+       if (!phdr)
+               return -ENOMEM;
+       memcpy(phdr, buffer, size);
+
        pr_debug("sending %s(%d) size: %d\n",
                  smscore_translate_msg(phdr->msg_type), phdr->msg_type,
                  phdr->msg_length);
 
        smsendian_handle_tx_message((struct sms_msg_data *) phdr);
-       smsendian_handle_message_header((struct sms_msg_hdr *)buffer);
-       return usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, 2),
-                           buffer, size, &dummy, 1000);
+       smsendian_handle_message_header((struct sms_msg_hdr *)phdr);
+       ret = usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, 2),
+                           phdr, size, &dummy, 1000);
+
+       kfree(phdr);
+       return ret;
 }
 
 static char *smsusb1_fw_lkup[] = {
index a0547dbf980645104d862fdc39e2ad740107ad15..76382c858c35435b98e061a7dda49d5dacad9585 100644 (file)
@@ -330,7 +330,7 @@ static int h_memstick_read_dev_id(struct memstick_dev *card,
        struct ms_id_register id_reg;
 
        if (!(*mrq)) {
-               memstick_init_req(&card->current_mrq, MS_TPC_READ_REG, NULL,
+               memstick_init_req(&card->current_mrq, MS_TPC_READ_REG, &id_reg,
                                  sizeof(struct ms_id_register));
                *mrq = &card->current_mrq;
                return 0;
index 1ef7575547e69d715972685fa46bdf10eb91f619..be42957a78e16010837ccbaa9cb7912d37163825 100644 (file)
@@ -56,6 +56,7 @@
  *     document number TBD : Wildcat Point-LP
  *     document number TBD : 9 Series
  *     document number TBD : Lewisburg
+ *     document number TBD : Apollo Lake SoC
  */
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 #define ACPIBASE_GCS_OFF       0x3410
 #define ACPIBASE_GCS_END       0x3414
 
+#define SPIBASE_BYT            0x54
+#define SPIBASE_BYT_SZ         512
+#define SPIBASE_BYT_EN         BIT(1)
+
+#define SPIBASE_LPT            0x3800
+#define SPIBASE_LPT_SZ         512
+#define BCR                    0xdc
+#define BCR_WPD                        BIT(0)
+
+#define SPIBASE_APL_SZ         4096
+
 #define GPIOBASE_ICH0          0x58
 #define GPIOCTRL_ICH0          0x5C
 #define GPIOBASE_ICH6          0x48
@@ -133,6 +145,12 @@ static struct resource gpio_ich_res[] = {
        },
 };
 
+static struct resource intel_spi_res[] = {
+       {
+               .flags = IORESOURCE_MEM,
+       }
+};
+
 static struct mfd_cell lpc_ich_wdt_cell = {
        .name = "iTCO_wdt",
        .num_resources = ARRAY_SIZE(wdt_ich_res),
@@ -147,6 +165,14 @@ static struct mfd_cell lpc_ich_gpio_cell = {
        .ignore_resource_conflicts = true,
 };
 
+
+static struct mfd_cell lpc_ich_spi_cell = {
+       .name = "intel-spi",
+       .num_resources = ARRAY_SIZE(intel_spi_res),
+       .resources = intel_spi_res,
+       .ignore_resource_conflicts = true,
+};
+
 /* chipset related info */
 enum lpc_chipsets {
        LPC_ICH = 0,    /* ICH */
@@ -216,6 +242,7 @@ enum lpc_chipsets {
        LPC_BRASWELL,   /* Braswell SoC */
        LPC_LEWISBURG,  /* Lewisburg */
        LPC_9S,         /* 9 Series */
+       LPC_APL,        /* Apollo Lake SoC */
 };
 
 static struct lpc_ich_info lpc_chipset_info[] = {
@@ -494,10 +521,12 @@ static struct lpc_ich_info lpc_chipset_info[] = {
                .name = "Lynx Point",
                .iTCO_version = 2,
                .gpio_version = ICH_V5_GPIO,
+               .spi_type = INTEL_SPI_LPT,
        },
        [LPC_LPT_LP] = {
                .name = "Lynx Point_LP",
                .iTCO_version = 2,
+               .spi_type = INTEL_SPI_LPT,
        },
        [LPC_WBG] = {
                .name = "Wellsburg",
@@ -511,6 +540,7 @@ static struct lpc_ich_info lpc_chipset_info[] = {
        [LPC_BAYTRAIL] = {
                .name = "Bay Trail SoC",
                .iTCO_version = 3,
+               .spi_type = INTEL_SPI_BYT,
        },
        [LPC_COLETO] = {
                .name = "Coleto Creek",
@@ -519,10 +549,12 @@ static struct lpc_ich_info lpc_chipset_info[] = {
        [LPC_WPT_LP] = {
                .name = "Wildcat Point_LP",
                .iTCO_version = 2,
+               .spi_type = INTEL_SPI_LPT,
        },
        [LPC_BRASWELL] = {
                .name = "Braswell SoC",
                .iTCO_version = 3,
+               .spi_type = INTEL_SPI_BYT,
        },
        [LPC_LEWISBURG] = {
                .name = "Lewisburg",
@@ -533,6 +565,10 @@ static struct lpc_ich_info lpc_chipset_info[] = {
                .iTCO_version = 2,
                .gpio_version = ICH_V5_GPIO,
        },
+       [LPC_APL] = {
+               .name = "Apollo Lake SoC",
+               .spi_type = INTEL_SPI_BXT,
+       },
 };
 
 /*
@@ -681,6 +717,7 @@ static const struct pci_device_id lpc_ich_ids[] = {
        { PCI_VDEVICE(INTEL, 0x3b14), LPC_3420},
        { PCI_VDEVICE(INTEL, 0x3b16), LPC_3450},
        { PCI_VDEVICE(INTEL, 0x5031), LPC_EP80579},
+       { PCI_VDEVICE(INTEL, 0x5ae8), LPC_APL},
        { PCI_VDEVICE(INTEL, 0x8c40), LPC_LPT},
        { PCI_VDEVICE(INTEL, 0x8c41), LPC_LPT},
        { PCI_VDEVICE(INTEL, 0x8c42), LPC_LPT},
@@ -1056,6 +1093,94 @@ wdt_done:
        return ret;
 }
 
+static int lpc_ich_init_spi(struct pci_dev *dev)
+{
+       struct lpc_ich_priv *priv = pci_get_drvdata(dev);
+       struct resource *res = &intel_spi_res[0];
+       struct intel_spi_boardinfo *info;
+       u32 spi_base, rcba, bcr;
+
+       info = devm_kzalloc(&dev->dev, sizeof(*info), GFP_KERNEL);
+       if (!info)
+               return -ENOMEM;
+
+       info->type = lpc_chipset_info[priv->chipset].spi_type;
+
+       switch (info->type) {
+       case INTEL_SPI_BYT:
+               pci_read_config_dword(dev, SPIBASE_BYT, &spi_base);
+               if (spi_base & SPIBASE_BYT_EN) {
+                       res->start = spi_base & ~(SPIBASE_BYT_SZ - 1);
+                       res->end = res->start + SPIBASE_BYT_SZ - 1;
+               }
+               break;
+
+       case INTEL_SPI_LPT:
+               pci_read_config_dword(dev, RCBABASE, &rcba);
+               if (rcba & 1) {
+                       spi_base = round_down(rcba, SPIBASE_LPT_SZ);
+                       res->start = spi_base + SPIBASE_LPT;
+                       res->end = res->start + SPIBASE_LPT_SZ - 1;
+
+                       /*
+                        * Try to make the flash chip writeable now by
+                        * setting BCR_WPD. It it fails we tell the driver
+                        * that it can only read the chip.
+                        */
+                       pci_read_config_dword(dev, BCR, &bcr);
+                       if (!(bcr & BCR_WPD)) {
+                               bcr |= BCR_WPD;
+                               pci_write_config_dword(dev, BCR, bcr);
+                               pci_read_config_dword(dev, BCR, &bcr);
+                       }
+                       info->writeable = !!(bcr & BCR_WPD);
+               }
+               break;
+
+       case INTEL_SPI_BXT: {
+               unsigned int p2sb = PCI_DEVFN(13, 0);
+               unsigned int spi = PCI_DEVFN(13, 2);
+               struct pci_bus *bus = dev->bus;
+
+               /*
+                * The P2SB is hidden by BIOS and we need to unhide it in
+                * order to read BAR of the SPI flash device. Once that is
+                * done we hide it again.
+                */
+               pci_bus_write_config_byte(bus, p2sb, 0xe1, 0x0);
+               pci_bus_read_config_dword(bus, spi, PCI_BASE_ADDRESS_0,
+                                         &spi_base);
+               if (spi_base != ~0) {
+                       res->start = spi_base & 0xfffffff0;
+                       res->end = res->start + SPIBASE_APL_SZ - 1;
+
+                       pci_bus_read_config_dword(bus, spi, BCR, &bcr);
+                       if (!(bcr & BCR_WPD)) {
+                               bcr |= BCR_WPD;
+                               pci_bus_write_config_dword(bus, spi, BCR, bcr);
+                               pci_bus_read_config_dword(bus, spi, BCR, &bcr);
+                       }
+                       info->writeable = !!(bcr & BCR_WPD);
+               }
+
+               pci_bus_write_config_byte(bus, p2sb, 0xe1, 0x1);
+               break;
+       }
+
+       default:
+               return -EINVAL;
+       }
+
+       if (!res->start)
+               return -ENODEV;
+
+       lpc_ich_spi_cell.platform_data = info;
+       lpc_ich_spi_cell.pdata_size = sizeof(*info);
+
+       return mfd_add_devices(&dev->dev, PLATFORM_DEVID_NONE,
+                              &lpc_ich_spi_cell, 1, NULL, 0, NULL);
+}
+
 static int lpc_ich_probe(struct pci_dev *dev,
                                const struct pci_device_id *id)
 {
@@ -1099,6 +1224,12 @@ static int lpc_ich_probe(struct pci_dev *dev,
                        cell_added = true;
        }
 
+       if (lpc_chipset_info[priv->chipset].spi_type) {
+               ret = lpc_ich_init_spi(dev);
+               if (!ret)
+                       cell_added = true;
+       }
+
        /*
         * We only care if at least one or none of the cells registered
         * successfully.
index 7f1b282d7d963c65cf8a7a8bf5d7f559f668951e..cb290b8ca0c81296231f53457001c92be7c93639 100644 (file)
@@ -1396,7 +1396,7 @@ int genwqe_device_remove(struct genwqe_dev *cd)
         * application which will decrease this reference from
         * 1/unused to 0/illegal and not from 2/used 1/empty.
         */
-       rc = atomic_read(&cd->cdev_genwqe.kobj.kref.refcount);
+       rc = kref_read(&cd->cdev_genwqe.kobj.kref);
        if (rc != 1) {
                dev_err(&pci_dev->dev,
                        "[%s] err: cdev_genwqe...refcount=%d\n", __func__, rc);
index cfa1039c62e725289cfe74aebb7a36de92b22eb2..67d27be60405489580e7d794ae4263984301e98a 100644 (file)
@@ -19,8 +19,12 @@ void lkdtm_SOFTLOCKUP(void);
 void lkdtm_HARDLOCKUP(void);
 void lkdtm_SPINLOCKUP(void);
 void lkdtm_HUNG_TASK(void);
-void lkdtm_ATOMIC_UNDERFLOW(void);
-void lkdtm_ATOMIC_OVERFLOW(void);
+void lkdtm_REFCOUNT_SATURATE_INC(void);
+void lkdtm_REFCOUNT_SATURATE_ADD(void);
+void lkdtm_REFCOUNT_ZERO_DEC(void);
+void lkdtm_REFCOUNT_ZERO_INC(void);
+void lkdtm_REFCOUNT_ZERO_SUB(void);
+void lkdtm_REFCOUNT_ZERO_ADD(void);
 void lkdtm_CORRUPT_LIST_ADD(void);
 void lkdtm_CORRUPT_LIST_DEL(void);
 
index 91edd0b55e5ce99e955708d7c5425515fd7740bd..cba0837aee2ed1a2b12d2b2a1802b25b769aaba1 100644 (file)
@@ -6,6 +6,7 @@
  */
 #include "lkdtm.h"
 #include <linux/list.h>
+#include <linux/refcount.h>
 #include <linux/sched.h>
 
 struct lkdtm_list {
@@ -129,28 +130,86 @@ void lkdtm_HUNG_TASK(void)
        schedule();
 }
 
-void lkdtm_ATOMIC_UNDERFLOW(void)
+void lkdtm_REFCOUNT_SATURATE_INC(void)
 {
-       atomic_t under = ATOMIC_INIT(INT_MIN);
+       refcount_t over = REFCOUNT_INIT(UINT_MAX - 1);
 
-       pr_info("attempting good atomic increment\n");
-       atomic_inc(&under);
-       atomic_dec(&under);
+       pr_info("attempting good refcount decrement\n");
+       refcount_dec(&over);
+       refcount_inc(&over);
 
-       pr_info("attempting bad atomic underflow\n");
-       atomic_dec(&under);
+       pr_info("attempting bad refcount inc overflow\n");
+       refcount_inc(&over);
+       refcount_inc(&over);
+       if (refcount_read(&over) == UINT_MAX)
+               pr_err("Correctly stayed saturated, but no BUG?!\n");
+       else
+               pr_err("Fail: refcount wrapped\n");
+}
+
+void lkdtm_REFCOUNT_SATURATE_ADD(void)
+{
+       refcount_t over = REFCOUNT_INIT(UINT_MAX - 1);
+
+       pr_info("attempting good refcount decrement\n");
+       refcount_dec(&over);
+       refcount_inc(&over);
+
+       pr_info("attempting bad refcount add overflow\n");
+       refcount_add(2, &over);
+       if (refcount_read(&over) == UINT_MAX)
+               pr_err("Correctly stayed saturated, but no BUG?!\n");
+       else
+               pr_err("Fail: refcount wrapped\n");
+}
+
+void lkdtm_REFCOUNT_ZERO_DEC(void)
+{
+       refcount_t zero = REFCOUNT_INIT(1);
+
+       pr_info("attempting bad refcount decrement to zero\n");
+       refcount_dec(&zero);
+       if (refcount_read(&zero) == 0)
+               pr_err("Stayed at zero, but no BUG?!\n");
+       else
+               pr_err("Fail: refcount went crazy\n");
 }
 
-void lkdtm_ATOMIC_OVERFLOW(void)
+void lkdtm_REFCOUNT_ZERO_SUB(void)
 {
-       atomic_t over = ATOMIC_INIT(INT_MAX);
+       refcount_t zero = REFCOUNT_INIT(1);
+
+       pr_info("attempting bad refcount subtract past zero\n");
+       if (!refcount_sub_and_test(2, &zero))
+               pr_info("wrap attempt was noticed\n");
+       if (refcount_read(&zero) == 1)
+               pr_err("Correctly stayed above 0, but no BUG?!\n");
+       else
+               pr_err("Fail: refcount wrapped\n");
+}
 
-       pr_info("attempting good atomic decrement\n");
-       atomic_dec(&over);
-       atomic_inc(&over);
+void lkdtm_REFCOUNT_ZERO_INC(void)
+{
+       refcount_t zero = REFCOUNT_INIT(0);
 
-       pr_info("attempting bad atomic overflow\n");
-       atomic_inc(&over);
+       pr_info("attempting bad refcount increment from zero\n");
+       refcount_inc(&zero);
+       if (refcount_read(&zero) == 0)
+               pr_err("Stayed at zero, but no BUG?!\n");
+       else
+               pr_err("Fail: refcount went past zero\n");
+}
+
+void lkdtm_REFCOUNT_ZERO_ADD(void)
+{
+       refcount_t zero = REFCOUNT_INIT(0);
+
+       pr_info("attempting bad refcount addition from zero\n");
+       refcount_add(2, &zero);
+       if (refcount_read(&zero) == 0)
+               pr_err("Stayed at zero, but no BUG?!\n");
+       else
+               pr_err("Fail: refcount went past zero\n");
 }
 
 void lkdtm_CORRUPT_LIST_ADD(void)
index 7eeb71a755499f94cb576902200472145c331e25..16e4cf1109306e954455539324adf4c12ddfd110 100644 (file)
@@ -220,8 +220,12 @@ struct crashtype crashtypes[] = {
        CRASHTYPE(WRITE_RO),
        CRASHTYPE(WRITE_RO_AFTER_INIT),
        CRASHTYPE(WRITE_KERN),
-       CRASHTYPE(ATOMIC_UNDERFLOW),
-       CRASHTYPE(ATOMIC_OVERFLOW),
+       CRASHTYPE(REFCOUNT_SATURATE_INC),
+       CRASHTYPE(REFCOUNT_SATURATE_ADD),
+       CRASHTYPE(REFCOUNT_ZERO_DEC),
+       CRASHTYPE(REFCOUNT_ZERO_INC),
+       CRASHTYPE(REFCOUNT_ZERO_SUB),
+       CRASHTYPE(REFCOUNT_ZERO_ADD),
        CRASHTYPE(USERCOPY_HEAP_SIZE_TO),
        CRASHTYPE(USERCOPY_HEAP_SIZE_FROM),
        CRASHTYPE(USERCOPY_HEAP_FLAG_TO),
index c6217a4993ad4da05094978e50cc3f1acb1b1fd5..a617aa5a3ad8434119bf7cb69a19d62f4a66a320 100644 (file)
@@ -67,7 +67,7 @@ static ssize_t mei_dbgfs_read_meclients(struct file *fp, char __user *ubuf,
                                me_cl->props.max_number_of_connections,
                                me_cl->props.max_msg_length,
                                me_cl->props.single_recv_buf,
-                               atomic_read(&me_cl->refcnt.refcount));
+                               kref_read(&me_cl->refcnt));
 
                        mei_me_cl_put(me_cl);
                }
index b61b52f9da3d88bc1903e14ffc56dd276700c814..0fccca075e2947337cc999e04438b1c1e2cc056d 100644 (file)
@@ -1706,10 +1706,10 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
                err = mmc_select_hs400(card);
                if (err)
                        goto free_card;
-       } else if (mmc_card_hs(card)) {
+       } else {
                /* Select the desired bus width optionally */
                err = mmc_select_bus_width(card);
-               if (err > 0) {
+               if (err > 0 && mmc_card_hs(card)) {
                        err = mmc_select_hs_ddr(card);
                        if (err)
                                goto free_card;
index b44306b886cb6d7a383abae4d985cd18ea1d48e3..73db08558e4dd6d100d44e04cd58649e63ee94e8 100644 (file)
@@ -3354,10 +3354,11 @@ int dw_mci_runtime_resume(struct device *dev)
 
                if (!slot)
                        continue;
-               if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) {
+               if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER)
                        dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
-                       dw_mci_setup_bus(slot, true);
-               }
+
+               /* Force setup bus to guarantee available clock output */
+               dw_mci_setup_bus(slot, true);
        }
 
        /* Now that slots are all setup, we can enable card detect */
index 01a804792f3007c1b8512b0cbdf4aea1a000cb82..b5972440c1bf606e677ad312a9a391576f859f70 100644 (file)
@@ -1023,7 +1023,12 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
                if (!host->busy_status && busy_resp &&
                    !(status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT)) &&
                    (readl(base + MMCISTATUS) & host->variant->busy_detect_flag)) {
-                       /* Unmask the busy IRQ */
+
+                       /* Clear the busy start IRQ */
+                       writel(host->variant->busy_detect_mask,
+                              host->base + MMCICLEAR);
+
+                       /* Unmask the busy end IRQ */
                        writel(readl(base + MMCIMASK0) |
                               host->variant->busy_detect_mask,
                               base + MMCIMASK0);
@@ -1038,10 +1043,14 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
 
                /*
                 * At this point we are not busy with a command, we have
-                * not received a new busy request, mask the busy IRQ and
-                * fall through to process the IRQ.
+                * not received a new busy request, clear and mask the busy
+                * end IRQ and fall through to process the IRQ.
                 */
                if (host->busy_status) {
+
+                       writel(host->variant->busy_detect_mask,
+                              host->base + MMCICLEAR);
+
                        writel(readl(base + MMCIMASK0) &
                               ~host->variant->busy_detect_mask,
                               base + MMCIMASK0);
@@ -1283,12 +1292,21 @@ static irqreturn_t mmci_irq(int irq, void *dev_id)
                }
 
                /*
-                * We intentionally clear the MCI_ST_CARDBUSY IRQ here (if it's
-                * enabled) since the HW seems to be triggering the IRQ on both
-                * edges while monitoring DAT0 for busy completion.
+                * We intentionally clear the MCI_ST_CARDBUSY IRQ (if it's
+                * enabled) in mmci_cmd_irq() function where ST Micro busy
+                * detection variant is handled. Considering the HW seems to be
+                * triggering the IRQ on both edges while monitoring DAT0 for
+                * busy completion and that same status bit is used to monitor
+                * start and end of busy detection, special care must be taken
+                * to make sure that both start and end interrupts are always
+                * cleared one after the other.
                 */
                status &= readl(host->base + MMCIMASK0);
-               writel(status, host->base + MMCICLEAR);
+               if (host->variant->busy_detect)
+                       writel(status & ~host->variant->busy_detect_mask,
+                              host->base + MMCICLEAR);
+               else
+                       writel(status, host->base + MMCICLEAR);
 
                dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status);
 
index 23909804ffb840d3187f21f67180a634a769425e..0def99590d162ebcfb86a16a6b9d5adf96f19cb6 100644 (file)
@@ -2733,7 +2733,8 @@ static irqreturn_t sdhci_irq(int irq, void *dev_id)
                if (intmask & SDHCI_INT_RETUNE)
                        mmc_retune_needed(host->mmc);
 
-               if (intmask & SDHCI_INT_CARD_INT) {
+               if ((intmask & SDHCI_INT_CARD_INT) &&
+                   (host->ier & SDHCI_INT_CARD_INT)) {
                        sdhci_enable_sdio_irq_nolock(host, false);
                        host->thread_isr |= SDHCI_INT_CARD_INT;
                        result = IRQ_WAKE_THREAD;
index 283ff7e17a0febd24de519fdd1991ce351d214e7..d10fa6c8f074648e0a7ec5e05562e15f4695b849 100644 (file)
@@ -9,6 +9,7 @@
  *
  */
 
+#include <linux/bcm47xx_nvram.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/slab.h>
@@ -83,6 +84,91 @@ out_default:
        return "rootfs";
 }
 
+static int bcm47xxpart_parse_trx(struct mtd_info *master,
+                                struct mtd_partition *trx,
+                                struct mtd_partition *parts,
+                                size_t parts_len)
+{
+       struct trx_header header;
+       size_t bytes_read;
+       int curr_part = 0;
+       int i, err;
+
+       if (parts_len < 3) {
+               pr_warn("No enough space to add TRX partitions!\n");
+               return -ENOMEM;
+       }
+
+       err = mtd_read(master, trx->offset, sizeof(header), &bytes_read,
+                      (uint8_t *)&header);
+       if (err && !mtd_is_bitflip(err)) {
+               pr_err("mtd_read error while reading TRX header: %d\n", err);
+               return err;
+       }
+
+       i = 0;
+
+       /* We have LZMA loader if offset[2] points to sth */
+       if (header.offset[2]) {
+               bcm47xxpart_add_part(&parts[curr_part++], "loader",
+                                    trx->offset + header.offset[i], 0);
+               i++;
+       }
+
+       if (header.offset[i]) {
+               bcm47xxpart_add_part(&parts[curr_part++], "linux",
+                                    trx->offset + header.offset[i], 0);
+               i++;
+       }
+
+       if (header.offset[i]) {
+               size_t offset = trx->offset + header.offset[i];
+               const char *name = bcm47xxpart_trx_data_part_name(master,
+                                                                 offset);
+
+               bcm47xxpart_add_part(&parts[curr_part++], name, offset, 0);
+               i++;
+       }
+
+       /*
+        * Assume that every partition ends at the beginning of the one it is
+        * followed by.
+        */
+       for (i = 0; i < curr_part; i++) {
+               u64 next_part_offset = (i < curr_part - 1) ?
+                                       parts[i + 1].offset :
+                                       trx->offset + trx->size;
+
+               parts[i].size = next_part_offset - parts[i].offset;
+       }
+
+       return curr_part;
+}
+
+/**
+ * bcm47xxpart_bootpartition - gets index of TRX partition used by bootloader
+ *
+ * Some devices may have more than one TRX partition. In such case one of them
+ * is the main one and another a failsafe one. Bootloader may fallback to the
+ * failsafe firmware if it detects corruption of the main image.
+ *
+ * This function provides info about currently used TRX partition. It's the one
+ * containing kernel started by the bootloader.
+ */
+static int bcm47xxpart_bootpartition(void)
+{
+       char buf[4];
+       int bootpartition;
+
+       /* Check CFE environment variable */
+       if (bcm47xx_nvram_getenv("bootpartition", buf, sizeof(buf)) > 0) {
+               if (!kstrtoint(buf, 0, &bootpartition))
+                       return bootpartition;
+       }
+
+       return 0;
+}
+
 static int bcm47xxpart_parse(struct mtd_info *master,
                             const struct mtd_partition **pparts,
                             struct mtd_part_parser_data *data)
@@ -93,9 +179,8 @@ static int bcm47xxpart_parse(struct mtd_info *master,
        size_t bytes_read;
        uint32_t offset;
        uint32_t blocksize = master->erasesize;
-       struct trx_header *trx;
-       int trx_part = -1;
-       int last_trx_part = -1;
+       int trx_parts[2]; /* Array with indexes of TRX partitions */
+       int trx_num = 0; /* Number of found TRX partitions */
        int possible_nvram_sizes[] = { 0x8000, 0xF000, 0x10000, };
        int err;
 
@@ -182,54 +267,18 @@ static int bcm47xxpart_parse(struct mtd_info *master,
 
                /* TRX */
                if (buf[0x000 / 4] == TRX_MAGIC) {
-                       if (BCM47XXPART_MAX_PARTS - curr_part < 4) {
-                               pr_warn("Not enough partitions left to register trx, scanning stopped!\n");
-                               break;
-                       }
-
-                       trx = (struct trx_header *)buf;
+                       struct trx_header *trx;
 
-                       trx_part = curr_part;
+                       if (trx_num >= ARRAY_SIZE(trx_parts))
+                               pr_warn("No enough space to store another TRX found at 0x%X\n",
+                                       offset);
+                       else
+                               trx_parts[trx_num++] = curr_part;
                        bcm47xxpart_add_part(&parts[curr_part++], "firmware",
                                             offset, 0);
 
-                       i = 0;
-                       /* We have LZMA loader if offset[2] points to sth */
-                       if (trx->offset[2]) {
-                               bcm47xxpart_add_part(&parts[curr_part++],
-                                                    "loader",
-                                                    offset + trx->offset[i],
-                                                    0);
-                               i++;
-                       }
-
-                       if (trx->offset[i]) {
-                               bcm47xxpart_add_part(&parts[curr_part++],
-                                                    "linux",
-                                                    offset + trx->offset[i],
-                                                    0);
-                               i++;
-                       }
-
-                       /*
-                        * Pure rootfs size is known and can be calculated as:
-                        * trx->length - trx->offset[i]. We don't fill it as
-                        * we want to have jffs2 (overlay) in the same mtd.
-                        */
-                       if (trx->offset[i]) {
-                               const char *name;
-
-                               name = bcm47xxpart_trx_data_part_name(master, offset + trx->offset[i]);
-                               bcm47xxpart_add_part(&parts[curr_part++],
-                                                    name,
-                                                    offset + trx->offset[i],
-                                                    0);
-                               i++;
-                       }
-
-                       last_trx_part = curr_part - 1;
-
                        /* Jump to the end of TRX */
+                       trx = (struct trx_header *)buf;
                        offset = roundup(offset + trx->length, blocksize);
                        /* Next loop iteration will increase the offset */
                        offset -= blocksize;
@@ -307,9 +356,23 @@ static int bcm47xxpart_parse(struct mtd_info *master,
                                       parts[i + 1].offset : master->size;
 
                parts[i].size = next_part_offset - parts[i].offset;
-               if (i == last_trx_part && trx_part >= 0)
-                       parts[trx_part].size = next_part_offset -
-                                              parts[trx_part].offset;
+       }
+
+       /* If there was TRX parse it now */
+       for (i = 0; i < trx_num; i++) {
+               struct mtd_partition *trx = &parts[trx_parts[i]];
+
+               if (i == bcm47xxpart_bootpartition()) {
+                       int num_parts;
+
+                       num_parts = bcm47xxpart_parse_trx(master, trx,
+                                                         parts + curr_part,
+                                                         BCM47XXPART_MAX_PARTS - curr_part);
+                       if (num_parts > 0)
+                               curr_part += num_parts;
+               } else {
+                       trx->name = "failsafe";
+               }
        }
 
        *pparts = parts;
index 514be04c0b6cc64962c1c4a4b2947346c5fa05dd..e2bd81817df44b89bc8ed2b04d074949496785dc 100644 (file)
@@ -105,15 +105,33 @@ static int bcm47xxsflash_read(struct mtd_info *mtd, loff_t from, size_t len,
                              size_t *retlen, u_char *buf)
 {
        struct bcm47xxsflash *b47s = mtd->priv;
+       size_t orig_len = len;
 
        /* Check address range */
        if ((from + len) > mtd->size)
                return -EINVAL;
 
-       memcpy_fromio(buf, b47s->window + from, len);
-       *retlen = len;
+       /* Read as much as possible using fast MMIO window */
+       if (from < BCM47XXSFLASH_WINDOW_SZ) {
+               size_t memcpy_len;
 
-       return len;
+               memcpy_len = min(len, (size_t)(BCM47XXSFLASH_WINDOW_SZ - from));
+               memcpy_fromio(buf, b47s->window + from, memcpy_len);
+               from += memcpy_len;
+               len -= memcpy_len;
+               buf += memcpy_len;
+       }
+
+       /* Use indirect access for content out of the window */
+       for (; len; len--) {
+               b47s->cc_write(b47s, BCMA_CC_FLASHADDR, from++);
+               bcm47xxsflash_cmd(b47s, OPCODE_ST_READ4B);
+               *buf++ = b47s->cc_read(b47s, BCMA_CC_FLASHDATA);
+       }
+
+       *retlen = orig_len;
+
+       return orig_len;
 }
 
 static int bcm47xxsflash_write_st(struct mtd_info *mtd, u32 offset, size_t len,
@@ -284,7 +302,6 @@ static int bcm47xxsflash_bcma_probe(struct platform_device *pdev)
        b47s = devm_kzalloc(dev, sizeof(*b47s), GFP_KERNEL);
        if (!b47s)
                return -ENOMEM;
-       sflash->priv = b47s;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (!res) {
@@ -334,6 +351,8 @@ static int bcm47xxsflash_bcma_probe(struct platform_device *pdev)
        b47s->size = sflash->size;
        bcm47xxsflash_fill_mtd(b47s, &pdev->dev);
 
+       platform_set_drvdata(pdev, b47s);
+
        err = mtd_device_parse_register(&b47s->mtd, probes, NULL, NULL, 0);
        if (err) {
                pr_err("Failed to register MTD device: %d\n", err);
@@ -349,8 +368,7 @@ static int bcm47xxsflash_bcma_probe(struct platform_device *pdev)
 
 static int bcm47xxsflash_bcma_remove(struct platform_device *pdev)
 {
-       struct bcma_sflash *sflash = dev_get_platdata(&pdev->dev);
-       struct bcm47xxsflash *b47s = sflash->priv;
+       struct bcm47xxsflash *b47s = platform_get_drvdata(pdev);
 
        mtd_device_unregister(&b47s->mtd);
        iounmap(b47s->window);
index 1564b62b412e3c071f5f3062e2e992296d531f91..b2d7b38f75fdf6acb177c1e5ddde6126cbe80c57 100644 (file)
@@ -3,6 +3,8 @@
 
 #include <linux/mtd/mtd.h>
 
+#define BCM47XXSFLASH_WINDOW_SZ                        SZ_16M
+
 /* Used for ST flashes only. */
 #define OPCODE_ST_WREN         0x0006          /* Write Enable */
 #define OPCODE_ST_WRDIS                0x0004          /* Write Disable */
@@ -16,6 +18,7 @@
 #define OPCODE_ST_RES          0x03ab          /* Read Electronic Signature */
 #define OPCODE_ST_CSA          0x1000          /* Keep chip select asserted */
 #define OPCODE_ST_SSE          0x0220          /* Sub-sector Erase */
+#define OPCODE_ST_READ4B       0x6313          /* Read Data Bytes in 4Byte addressing mode */
 
 /* Used for Atmel flashes only. */
 #define OPCODE_AT_READ                         0x07e8
index 9cf7fcd280340ea2b5f72de499b7bba7e6893f34..c4df3b1bded0bac4acf44bbbe8bb4430c073f77a 100644 (file)
@@ -172,7 +172,8 @@ static ssize_t m25p80_read(struct spi_nor *nor, loff_t from, size_t len,
 
        t[1].rx_buf = buf;
        t[1].rx_nbits = m25p80_rx_nbits(nor);
-       t[1].len = min(len, spi_max_transfer_size(spi));
+       t[1].len = min3(len, spi_max_transfer_size(spi),
+                       spi_max_message_size(spi) - t[0].len);
        spi_message_add_tail(&t[1], &m);
 
        ret = spi_sync(spi, &m);
@@ -288,7 +289,6 @@ static const struct spi_device_id m25p_ids[] = {
         * should be kept for backward compatibility.
         */
        {"at25df321a"}, {"at25df641"},  {"at26df081a"},
-       {"mr25h256"},
        {"mx25l4005a"}, {"mx25l1606e"}, {"mx25l6405d"}, {"mx25l12805d"},
        {"mx25l25635e"},{"mx66l51235l"},
        {"n25q064"},    {"n25q128a11"}, {"n25q128a13"}, {"n25q512a"},
@@ -305,6 +305,11 @@ static const struct spi_device_id m25p_ids[] = {
        {"m25p40-nonjedec"},    {"m25p80-nonjedec"},    {"m25p16-nonjedec"},
        {"m25p32-nonjedec"},    {"m25p64-nonjedec"},    {"m25p128-nonjedec"},
 
+       /* Everspin MRAMs (non-JEDEC) */
+       { "mr25h256" }, /* 256 Kib, 40 MHz */
+       { "mr25h10" },  /*   1 Mib, 40 MHz */
+       { "mr25h40" },  /*   4 Mib, 40 MHz */
+
        { },
 };
 MODULE_DEVICE_TABLE(spi, m25p_ids);
index f59a125295d01ddfafec5c596b846e9ea49dd437..8b81e15105dd4a6cc34b90ed8f50b08aa41ac9fa 100644 (file)
 #define SPINOR_OP_RDVCR                0x85
 
 /* JEDEC Standard - Serial Flash Discoverable Parmeters (SFDP) Commands */
-#define SPINOR_OP_READ_1_2_2   0xbb    /* DUAL I/O READ */
-#define SPINOR_OP_READ_1_4_4   0xeb    /* QUAD I/O READ */
-
 #define SPINOR_OP_WRITE                0x02    /* PAGE PROGRAM */
 #define SPINOR_OP_WRITE_1_1_2  0xa2    /* DUAL INPUT PROGRAM */
 #define SPINOR_OP_WRITE_1_2_2  0xd2    /* DUAL INPUT EXT PROGRAM */
 #define SPINOR_OP_WRITE_1_1_4  0x32    /* QUAD INPUT PROGRAM */
 #define SPINOR_OP_WRITE_1_4_4  0x12    /* QUAD INPUT EXT PROGRAM */
 
-/* READ commands with 32-bit addressing */
-#define SPINOR_OP_READ4_1_2_2  0xbc
-#define SPINOR_OP_READ4_1_4_4  0xec
-
 /* Configuration flags */
 #define FLASH_FLAG_SINGLE      0x000000ff
 #define FLASH_FLAG_READ_WRITE  0x00000001
index 5454b411358920f54e79c5f329d749496dcd965d..804313a33f2bec433010350f47ef063313354f94 100644 (file)
@@ -507,13 +507,13 @@ static struct seq_rw_config n25q_read3_configs[] = {
  *     - 'FAST' variants configured for 8 dummy cycles (see note above.)
  */
 static struct seq_rw_config n25q_read4_configs[] = {
-       {FLASH_FLAG_READ_1_4_4, SPINOR_OP_READ4_1_4_4,  0, 4, 4, 0x00, 0, 8},
-       {FLASH_FLAG_READ_1_1_4, SPINOR_OP_READ4_1_1_4,  0, 1, 4, 0x00, 0, 8},
-       {FLASH_FLAG_READ_1_2_2, SPINOR_OP_READ4_1_2_2,  0, 2, 2, 0x00, 0, 8},
-       {FLASH_FLAG_READ_1_1_2, SPINOR_OP_READ4_1_1_2,  0, 1, 2, 0x00, 0, 8},
-       {FLASH_FLAG_READ_FAST,  SPINOR_OP_READ4_FAST,   0, 1, 1, 0x00, 0, 8},
-       {FLASH_FLAG_READ_WRITE, SPINOR_OP_READ4,        0, 1, 1, 0x00, 0, 0},
-       {0x00,                  0,                      0, 0, 0, 0x00, 0, 0},
+       {FLASH_FLAG_READ_1_4_4, SPINOR_OP_READ_1_4_4_4B, 0, 4, 4, 0x00, 0, 8},
+       {FLASH_FLAG_READ_1_1_4, SPINOR_OP_READ_1_1_4_4B, 0, 1, 4, 0x00, 0, 8},
+       {FLASH_FLAG_READ_1_2_2, SPINOR_OP_READ_1_2_2_4B, 0, 2, 2, 0x00, 0, 8},
+       {FLASH_FLAG_READ_1_1_2, SPINOR_OP_READ_1_1_2_4B, 0, 1, 2, 0x00, 0, 8},
+       {FLASH_FLAG_READ_FAST,  SPINOR_OP_READ_FAST_4B,  0, 1, 1, 0x00, 0, 8},
+       {FLASH_FLAG_READ_WRITE, SPINOR_OP_READ_4B,       0, 1, 1, 0x00, 0, 0},
+       {0x00,                  0,                       0, 0, 0, 0x00, 0, 0},
 };
 
 /*
@@ -553,13 +553,13 @@ static int stfsm_mx25_en_32bit_addr_seq(struct stfsm_seq *seq)
  * entering a state that is incompatible with the SPIBoot Controller.
  */
 static struct seq_rw_config stfsm_s25fl_read4_configs[] = {
-       {FLASH_FLAG_READ_1_4_4,  SPINOR_OP_READ4_1_4_4,  0, 4, 4, 0x00, 2, 4},
-       {FLASH_FLAG_READ_1_1_4,  SPINOR_OP_READ4_1_1_4,  0, 1, 4, 0x00, 0, 8},
-       {FLASH_FLAG_READ_1_2_2,  SPINOR_OP_READ4_1_2_2,  0, 2, 2, 0x00, 4, 0},
-       {FLASH_FLAG_READ_1_1_2,  SPINOR_OP_READ4_1_1_2,  0, 1, 2, 0x00, 0, 8},
-       {FLASH_FLAG_READ_FAST,   SPINOR_OP_READ4_FAST,   0, 1, 1, 0x00, 0, 8},
-       {FLASH_FLAG_READ_WRITE,  SPINOR_OP_READ4,        0, 1, 1, 0x00, 0, 0},
-       {0x00,                   0,                      0, 0, 0, 0x00, 0, 0},
+       {FLASH_FLAG_READ_1_4_4,  SPINOR_OP_READ_1_4_4_4B,  0, 4, 4, 0x00, 2, 4},
+       {FLASH_FLAG_READ_1_1_4,  SPINOR_OP_READ_1_1_4_4B,  0, 1, 4, 0x00, 0, 8},
+       {FLASH_FLAG_READ_1_2_2,  SPINOR_OP_READ_1_2_2_4B,  0, 2, 2, 0x00, 4, 0},
+       {FLASH_FLAG_READ_1_1_2,  SPINOR_OP_READ_1_1_2_4B,  0, 1, 2, 0x00, 0, 8},
+       {FLASH_FLAG_READ_FAST,   SPINOR_OP_READ_FAST_4B,   0, 1, 1, 0x00, 0, 8},
+       {FLASH_FLAG_READ_WRITE,  SPINOR_OP_READ_4B,        0, 1, 1, 0x00, 0, 0},
+       {0x00,                   0,                        0, 0, 0, 0x00, 0, 0},
 };
 
 static struct seq_rw_config stfsm_s25fl_write4_configs[] = {
index 5bcc896a48c3372ad192ae403f49268eb8f49c44..542fdf8e81faf7a7152b5cb8312ed4721645a86d 100644 (file)
@@ -75,7 +75,7 @@ config MTD_PHYSMAP_OF
          taken from OF device tree.
 
 config MTD_PHYSMAP_OF_VERSATILE
-       bool "Support ARM Versatile physmap OF"
+       bool "ARM Versatile OF-based physical memory map handling"
        depends on MTD_PHYSMAP_OF
        depends on MFD_SYSCON
        default y if (ARCH_INTEGRATOR || ARCH_VERSATILE || ARCH_REALVIEW)
@@ -84,6 +84,16 @@ config MTD_PHYSMAP_OF_VERSATILE
          platforms, basically to add a VPP (write protection) callback so
          the flash can be taken out of write protection.
 
+config MTD_PHYSMAP_OF_GEMINI
+       bool "Cortina Gemini OF-based physical memory map handling"
+       depends on MTD_PHYSMAP_OF
+       depends on MFD_SYSCON
+       default ARCH_GEMINI
+       help
+         This provides some extra DT physmap parsing for the Gemini
+         platforms, some detection and setting up parallel mode on the
+         external interface.
+
 config MTD_PMC_MSP_EVM
        tristate "CFI Flash device mapped on PMC-Sierra MSP"
        depends on PMC_MSP && MTD_CFI
index 644f7d36d35dc942b83cb151ad11c3aea6eabc53..aef1846b4de24321ca7cc65122d5cb14c1a00b6e 100644 (file)
@@ -17,10 +17,13 @@ obj-$(CONFIG_MTD_CK804XROM) += ck804xrom.o
 obj-$(CONFIG_MTD_TSUNAMI)      += tsunami_flash.o
 obj-$(CONFIG_MTD_PXA2XX)       += pxa2xx-flash.o
 obj-$(CONFIG_MTD_PHYSMAP)      += physmap.o
-obj-$(CONFIG_MTD_PHYSMAP_OF)   += physmap_of.o
 ifdef CONFIG_MTD_PHYSMAP_OF_VERSATILE
-obj-$(CONFIG_MTD_PHYSMAP_OF)   += physmap_of_versatile.o
+physmap_of-objs += physmap_of_versatile.o
+endif
+ifdef CONFIG_MTD_PHYSMAP_OF_GEMINI
+physmap_of-objs += physmap_of_gemini.o
 endif
+obj-$(CONFIG_MTD_PHYSMAP_OF)   += physmap_of.o
 obj-$(CONFIG_MTD_PISMO)                += pismo.o
 obj-$(CONFIG_MTD_PMC_MSP_EVM)   += pmcmsp-flash.o
 obj-$(CONFIG_MTD_PCMCIA)       += pcmciamtd.o
index e17d02ae03f0966419a8e42b5192967a98883b39..976d42f63aef6b45bc5489c79dd5283de238454e 100644 (file)
@@ -57,10 +57,12 @@ static void ichxrom_cleanup(struct ichxrom_window *window)
 {
        struct ichxrom_map_info *map, *scratch;
        u16 word;
+       int ret;
 
        /* Disable writes through the rom window */
-       pci_read_config_word(window->pdev, BIOS_CNTL, &word);
-       pci_write_config_word(window->pdev, BIOS_CNTL, word & ~1);
+       ret = pci_read_config_word(window->pdev, BIOS_CNTL, &word);
+       if (!ret)
+               pci_write_config_word(window->pdev, BIOS_CNTL, word & ~1);
        pci_dev_put(window->pdev);
 
        /* Free all of the mtd devices */
index c8febb326fa6695be04bf8bca0daa50083fe5dbb..3e33ab66eb24f2e4ddb8da5afab4691a038d03f2 100644 (file)
@@ -4,7 +4,7 @@
  *  by the Free Software Foundation.
  *
  *  Copyright (C) 2004 Liu Peng Infineon IFAP DC COM CPE
- *  Copyright (C) 2010 John Crispin <blogic@openwrt.org>
+ *  Copyright (C) 2010 John Crispin <john@phrozen.org>
  */
 
 #include <linux/err.h>
@@ -209,5 +209,5 @@ static struct platform_driver ltq_mtd_driver = {
 module_platform_driver(ltq_mtd_driver);
 
 MODULE_LICENSE("GPL");
-MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
+MODULE_AUTHOR("John Crispin <john@phrozen.org>");
 MODULE_DESCRIPTION("Lantiq SoC NOR");
index 3fad35942895c0186dfa08d22ab655fc0a4ed37f..14e8909c99555bb89a84f5c7671b3985b315c883 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/of_address.h>
 #include <linux/of_platform.h>
 #include <linux/slab.h>
+#include "physmap_of_gemini.h"
 #include "physmap_of_versatile.h"
 
 struct of_flash_list {
@@ -241,11 +242,13 @@ static int of_flash_probe(struct platform_device *dev)
                info->list[i].map.size = res_size;
                info->list[i].map.bankwidth = be32_to_cpup(width);
                info->list[i].map.device_node = dp;
+
+               err = of_flash_probe_gemini(dev, dp, &info->list[i].map);
+               if (err)
+                       return err;
                err = of_flash_probe_versatile(dev, dp, &info->list[i].map);
-               if (err) {
-                       dev_err(&dev->dev, "Can't probe Versatile VPP\n");
+               if (err)
                        return err;
-               }
 
                err = -ENOMEM;
                info->list[i].map.virt = ioremap(info->list[i].map.phys,
diff --git a/drivers/mtd/maps/physmap_of_gemini.c b/drivers/mtd/maps/physmap_of_gemini.c
new file mode 100644 (file)
index 0000000..9d371cd
--- /dev/null
@@ -0,0 +1,117 @@
+/*
+ * Cortina Systems Gemini OF physmap add-on
+ * Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org>
+ *
+ * This SoC has an elaborate flash control register, so we need to
+ * detect and set it up when booting on this platform.
+ */
+#include <linux/export.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/mtd/map.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/bitops.h>
+#include "physmap_of_gemini.h"
+
+/*
+ * The Flash-relevant parts of the global status register
+ * These would also be relevant for a NAND driver.
+ */
+#define GLOBAL_STATUS                  0x04
+#define FLASH_TYPE_MASK                        (0x3 << 24)
+#define FLASH_TYPE_NAND_2K             (0x3 << 24)
+#define FLASH_TYPE_NAND_512            (0x2 << 24)
+#define FLASH_TYPE_PARALLEL            (0x1 << 24)
+#define FLASH_TYPE_SERIAL              (0x0 << 24)
+/* if parallel */
+#define FLASH_WIDTH_16BIT              (1 << 23)       /* else 8 bit */
+/* if serial */
+#define FLASH_ATMEL                    (1 << 23)       /* else STM */
+
+#define FLASH_SIZE_MASK                        (0x3 << 21)
+#define NAND_256M                      (0x3 << 21)     /* and more */
+#define NAND_128M                      (0x2 << 21)
+#define NAND_64M                       (0x1 << 21)
+#define NAND_32M                       (0x0 << 21)
+#define ATMEL_16M                      (0x3 << 21)     /* and more */
+#define ATMEL_8M                       (0x2 << 21)
+#define ATMEL_4M_2M                    (0x1 << 21)
+#define ATMEL_1M                       (0x0 << 21)     /* and less */
+#define STM_32M                                (1 << 22)       /* and more */
+#define STM_16M                                (0 << 22)       /* and less */
+
+#define FLASH_PARALLEL_HIGH_PIN_CNT    (1 << 20)       /* else low pin cnt */
+
+/* Miscellaneous Control Register */
+#define GLOBAL_MISC_CTRL               0x30
+#define FLASH_PADS_MASK                        0x07
+#define NAND_PADS_DISABLE              BIT(2)
+#define PFLASH_PADS_DISABLE            BIT(1)
+#define SFLASH_PADS_DISABLE            BIT(0)
+
+static const struct of_device_id syscon_match[] = {
+       { .compatible = "cortina,gemini-syscon" },
+       { },
+};
+
+int of_flash_probe_gemini(struct platform_device *pdev,
+                         struct device_node *np,
+                         struct map_info *map)
+{
+       static struct regmap *rmap;
+       struct device *dev = &pdev->dev;
+       u32 val;
+       int ret;
+
+       /* Multiplatform guard */
+       if (!of_device_is_compatible(np, "cortina,gemini-flash"))
+               return 0;
+
+       rmap = syscon_regmap_lookup_by_phandle(np, "syscon");
+       if (IS_ERR(rmap)) {
+               dev_err(dev, "no syscon\n");
+               return PTR_ERR(rmap);
+       }
+
+       ret = regmap_read(rmap, GLOBAL_STATUS, &val);
+       if (ret) {
+               dev_err(dev, "failed to read global status register\n");
+               return -ENODEV;
+       }
+       dev_dbg(dev, "global status reg: %08x\n", val);
+
+       /*
+        * It would be contradictory if a physmap flash was NOT parallel.
+        */
+       if ((val & FLASH_TYPE_MASK) != FLASH_TYPE_PARALLEL) {
+               dev_err(dev, "flash is not parallel\n");
+               return -ENODEV;
+       }
+
+       /*
+        * Complain if DT data and hardware definition is different.
+        */
+       if (val & FLASH_WIDTH_16BIT) {
+               if (map->bankwidth != 2)
+                       dev_warn(dev, "flash hardware say flash is 16 bit wide but DT says it is %d bits wide\n",
+                                map->bankwidth * 8);
+       } else {
+               if (map->bankwidth != 1)
+                       dev_warn(dev, "flash hardware say flash is 8 bit wide but DT says it is %d bits wide\n",
+                                map->bankwidth * 8);
+       }
+
+       /* Activate parallel (NOR flash) mode */
+       ret = regmap_update_bits(rmap, GLOBAL_MISC_CTRL,
+                                FLASH_PADS_MASK,
+                                SFLASH_PADS_DISABLE | NAND_PADS_DISABLE);
+       if (ret) {
+               dev_err(dev, "unable to set up physmap pads\n");
+               return -ENODEV;
+       }
+
+       dev_info(&pdev->dev, "initialized Gemini-specific physmap control\n");
+
+       return 0;
+}
diff --git a/drivers/mtd/maps/physmap_of_gemini.h b/drivers/mtd/maps/physmap_of_gemini.h
new file mode 100644 (file)
index 0000000..c675025
--- /dev/null
@@ -0,0 +1,16 @@
+#include <linux/of.h>
+#include <linux/mtd/map.h>
+
+#ifdef CONFIG_MTD_PHYSMAP_OF_GEMINI
+int of_flash_probe_gemini(struct platform_device *pdev,
+                         struct device_node *np,
+                         struct map_info *map);
+#else
+static inline
+int of_flash_probe_gemini(struct platform_device *pdev,
+                         struct device_node *np,
+                         struct map_info *map)
+{
+       return 0;
+}
+#endif
index 0f39b2a015f403f30046256815429c1c1406ec7f..8c6ccded9be8020db4688bc0654c50aa2301e00f 100644 (file)
@@ -252,4 +252,3 @@ int of_flash_probe_versatile(struct platform_device *pdev,
 
        return 0;
 }
-EXPORT_SYMBOL_GPL(of_flash_probe_versatile);
index f9fa3fad728e5e3b0687c5affadd7cf8d0b129b0..2051f28ddac6e6312cd8b1746aa3db9f561ab1ec 100644 (file)
@@ -139,15 +139,13 @@ static int __init init_msp_flash(void)
                }
 
                msp_maps[i].bankwidth = 1;
-               msp_maps[i].name = kmalloc(7, GFP_KERNEL);
+               msp_maps[i].name = kstrndup(flash_name, 7, GFP_KERNEL);
                if (!msp_maps[i].name) {
                        iounmap(msp_maps[i].virt);
                        kfree(msp_parts[i]);
                        goto cleanup_loop;
                }
 
-               msp_maps[i].name = strncpy(msp_maps[i].name, flash_name, 7);
-
                for (j = 0; j < pcnt; j++) {
                        part_name[5] = '0' + i;
                        part_name[7] = '0' + j;
index ce5ccc573a9c11a06572e69c03d2356170b1c739..3568294d4854592454ad026fe2c26969148419fb 100644 (file)
@@ -451,7 +451,7 @@ static int mtdchar_readoob(struct file *file, struct mtd_info *mtd,
         * data. For our userspace tools it is important to dump areas
         * with ECC errors!
         * For kernel internal usage it also might return -EUCLEAN
-        * to signal the caller that a bitflip has occured and has
+        * to signal the caller that a bitflip has occurred and has
         * been corrected by the ECC algorithm.
         *
         * Note: currently the standard NAND function, nand_read_oob_std,
index 052772f7caef739714b87b4f6a41b76963cb0e02..66a9dedd10620b34aad39182ecdc2b444df360d3 100644 (file)
@@ -1128,7 +1128,7 @@ EXPORT_SYMBOL_GPL(mtd_write_oob);
  * @oobecc: OOB region struct filled with the appropriate ECC position
  *         information
  *
- * This functions return ECC section information in the OOB area. I you want
+ * This function returns ECC section information in the OOB area. If you want
  * to get all the ECC bytes information, then you should call
  * mtd_ooblayout_ecc(mtd, section++, oobecc) until it returns -ERANGE.
  *
@@ -1160,7 +1160,7 @@ EXPORT_SYMBOL_GPL(mtd_ooblayout_ecc);
  * @oobfree: OOB region struct filled with the appropriate free position
  *          information
  *
- * This functions return free bytes position in the OOB area. I you want
+ * This function returns free bytes position in the OOB area. If you want
  * to get all the free bytes information, then you should call
  * mtd_ooblayout_free(mtd, section++, oobfree) until it returns -ERANGE.
  *
@@ -1190,7 +1190,7 @@ EXPORT_SYMBOL_GPL(mtd_ooblayout_free);
  * @iter: iterator function. Should be either mtd_ooblayout_free or
  *       mtd_ooblayout_ecc depending on the region type you're searching for
  *
- * This functions returns the section id and oobregion information of a
+ * This function returns the section id and oobregion information of a
  * specific byte. For example, say you want to know where the 4th ECC byte is
  * stored, you'll use:
  *
index fccdd49bb96407d844e11d67a454ae015194bdea..ea5e5307f667f2a6669287377a633824bb67a4bb 100644 (file)
@@ -349,6 +349,14 @@ static const struct mtd_ooblayout_ops part_ooblayout_ops = {
        .free = part_ooblayout_free,
 };
 
+static int part_max_bad_blocks(struct mtd_info *mtd, loff_t ofs, size_t len)
+{
+       struct mtd_part *part = mtd_to_part(mtd);
+
+       return part->master->_max_bad_blocks(part->master,
+                                            ofs + part->offset, len);
+}
+
 static inline void free_partition(struct mtd_part *p)
 {
        kfree(p->mtd.name);
@@ -424,6 +432,7 @@ static struct mtd_part *allocate_partition(struct mtd_info *master,
        slave->mtd.dev.parent = IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER) ?
                                &master->dev :
                                master->dev.parent;
+       slave->mtd.dev.of_node = part->of_node;
 
        slave->mtd._read = part_read;
        slave->mtd._write = part_write;
@@ -475,6 +484,8 @@ static struct mtd_part *allocate_partition(struct mtd_info *master,
                slave->mtd._block_isbad = part_block_isbad;
        if (master->_block_markbad)
                slave->mtd._block_markbad = part_block_markbad;
+       if (master->_max_bad_blocks)
+               slave->mtd._max_bad_blocks = part_max_bad_blocks;
 
        if (master->_get_device)
                slave->mtd._get_device = part_get_device;
index 9ce5dcb4abd0f5026384ed4e0466d1ddfa44c617..6d4d5672d1d8ea0945cdaebcbf06d17f5984017b 100644 (file)
@@ -426,6 +426,7 @@ config MTD_NAND_ORION
 
 config MTD_NAND_OXNAS
        tristate "NAND Flash support for Oxford Semiconductor SoC"
+       depends on ARCH_OXNAS || COMPILE_TEST
        depends on HAS_IOMEM
        help
          This enables the NAND flash controller on Oxford Semiconductor SoCs.
@@ -535,6 +536,7 @@ config MTD_NAND_JZ4780
 
 config MTD_NAND_FSMC
        tristate "Support for NAND on ST Micros FSMC"
+       depends on OF
        depends on PLAT_SPEAR || ARCH_NOMADIK || ARCH_U8500 || MACH_U300
        help
          Enables support for NAND Flash chips on the ST Microelectronics
index 0a177b1bfe3e77600b9fc9c943f6f961d195cd4f..d1570f512f0bbad5c07c9903528c125e412c029a 100644 (file)
@@ -258,9 +258,15 @@ static void fsl_ifc_run_command(struct mtd_info *mtd)
                int bufnum = nctrl->page & priv->bufnum_mask;
                int sector = bufnum * chip->ecc.steps;
                int sector_end = sector + chip->ecc.steps - 1;
+               __be32 *eccstat_regs;
+
+               if (ctrl->version >= FSL_IFC_VERSION_2_0_0)
+                       eccstat_regs = ifc->ifc_nand.v2_nand_eccstat;
+               else
+                       eccstat_regs = ifc->ifc_nand.v1_nand_eccstat;
 
                for (i = sector / 4; i <= sector_end / 4; i++)
-                       eccstat[i] = ifc_in32(&ifc->ifc_nand.nand_eccstat[i]);
+                       eccstat[i] = ifc_in32(&eccstat_regs[i]);
 
                for (i = sector; i <= sector_end; i++) {
                        errors = check_read_ecc(mtd, ctrl, eccstat, i);
index 4924b43977ef8b1e2781dee0f980b5bf10950737..bda1e4667138ab3cc392f961f7db6da377829b03 100644 (file)
 #include <linux/mtd/partitions.h>
 #include <linux/io.h>
 #include <linux/slab.h>
-#include <linux/mtd/fsmc.h>
 #include <linux/amba/bus.h>
 #include <mtd/mtd-abi.h>
 
+#define FSMC_NAND_BW8          1
+#define FSMC_NAND_BW16         2
+
+#define FSMC_MAX_NOR_BANKS     4
+#define FSMC_MAX_NAND_BANKS    4
+
+#define FSMC_FLASH_WIDTH8      1
+#define FSMC_FLASH_WIDTH16     2
+
+/* fsmc controller registers for NOR flash */
+#define CTRL                   0x0
+       /* ctrl register definitions */
+       #define BANK_ENABLE             (1 << 0)
+       #define MUXED                   (1 << 1)
+       #define NOR_DEV                 (2 << 2)
+       #define WIDTH_8                 (0 << 4)
+       #define WIDTH_16                (1 << 4)
+       #define RSTPWRDWN               (1 << 6)
+       #define WPROT                   (1 << 7)
+       #define WRT_ENABLE              (1 << 12)
+       #define WAIT_ENB                (1 << 13)
+
+#define CTRL_TIM               0x4
+       /* ctrl_tim register definitions */
+
+#define FSMC_NOR_BANK_SZ       0x8
+#define FSMC_NOR_REG_SIZE      0x40
+
+#define FSMC_NOR_REG(base, bank, reg)          (base + \
+                                               FSMC_NOR_BANK_SZ * (bank) + \
+                                               reg)
+
+/* fsmc controller registers for NAND flash */
+#define PC                     0x00
+       /* pc register definitions */
+       #define FSMC_RESET              (1 << 0)
+       #define FSMC_WAITON             (1 << 1)
+       #define FSMC_ENABLE             (1 << 2)
+       #define FSMC_DEVTYPE_NAND       (1 << 3)
+       #define FSMC_DEVWID_8           (0 << 4)
+       #define FSMC_DEVWID_16          (1 << 4)
+       #define FSMC_ECCEN              (1 << 6)
+       #define FSMC_ECCPLEN_512        (0 << 7)
+       #define FSMC_ECCPLEN_256        (1 << 7)
+       #define FSMC_TCLR_1             (1)
+       #define FSMC_TCLR_SHIFT         (9)
+       #define FSMC_TCLR_MASK          (0xF)
+       #define FSMC_TAR_1              (1)
+       #define FSMC_TAR_SHIFT          (13)
+       #define FSMC_TAR_MASK           (0xF)
+#define STS                    0x04
+       /* sts register definitions */
+       #define FSMC_CODE_RDY           (1 << 15)
+#define COMM                   0x08
+       /* comm register definitions */
+       #define FSMC_TSET_0             0
+       #define FSMC_TSET_SHIFT         0
+       #define FSMC_TSET_MASK          0xFF
+       #define FSMC_TWAIT_6            6
+       #define FSMC_TWAIT_SHIFT        8
+       #define FSMC_TWAIT_MASK         0xFF
+       #define FSMC_THOLD_4            4
+       #define FSMC_THOLD_SHIFT        16
+       #define FSMC_THOLD_MASK         0xFF
+       #define FSMC_THIZ_1             1
+       #define FSMC_THIZ_SHIFT         24
+       #define FSMC_THIZ_MASK          0xFF
+#define ATTRIB                 0x0C
+#define IOATA                  0x10
+#define ECC1                   0x14
+#define ECC2                   0x18
+#define ECC3                   0x1C
+#define FSMC_NAND_BANK_SZ      0x20
+
+#define FSMC_NAND_REG(base, bank, reg)         (base + FSMC_NOR_REG_SIZE + \
+                                               (FSMC_NAND_BANK_SZ * (bank)) + \
+                                               reg)
+
+#define FSMC_BUSY_WAIT_TIMEOUT (1 * HZ)
+
+struct fsmc_nand_timings {
+       uint8_t tclr;
+       uint8_t tar;
+       uint8_t thiz;
+       uint8_t thold;
+       uint8_t twait;
+       uint8_t tset;
+};
+
+enum access_mode {
+       USE_DMA_ACCESS = 1,
+       USE_WORD_ACCESS,
+};
+
+/**
+ * fsmc_nand_platform_data - platform specific NAND controller config
+ * @nand_timings: timing setup for the physical NAND interface
+ * @partitions: partition table for the platform, use a default fallback
+ * if this is NULL
+ * @nr_partitions: the number of partitions in the previous entry
+ * @options: different options for the driver
+ * @width: bus width
+ * @bank: default bank
+ * @select_bank: callback to select a certain bank, this is
+ * platform-specific. If the controller only supports one bank
+ * this may be set to NULL
+ */
+struct fsmc_nand_platform_data {
+       struct fsmc_nand_timings *nand_timings;
+       struct mtd_partition    *partitions;
+       unsigned int            nr_partitions;
+       unsigned int            options;
+       unsigned int            width;
+       unsigned int            bank;
+
+       enum access_mode        mode;
+
+       void                    (*select_bank)(uint32_t bank, uint32_t busw);
+
+       /* priv structures for dma accesses */
+       void                    *read_dma_priv;
+       void                    *write_dma_priv;
+};
+
 static int fsmc_ecc1_ooblayout_ecc(struct mtd_info *mtd, int section,
                                   struct mtd_oob_region *oobregion)
 {
@@ -714,7 +837,6 @@ static bool filter(struct dma_chan *chan, void *slave)
        return true;
 }
 
-#ifdef CONFIG_OF
 static int fsmc_nand_probe_config_dt(struct platform_device *pdev,
                                     struct device_node *np)
 {
@@ -757,13 +879,6 @@ static int fsmc_nand_probe_config_dt(struct platform_device *pdev,
        }
        return 0;
 }
-#else
-static int fsmc_nand_probe_config_dt(struct platform_device *pdev,
-                                    struct device_node *np)
-{
-       return -ENOSYS;
-}
-#endif
 
 /*
  * fsmc_nand_probe - Probe function
@@ -782,19 +897,15 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
        u32 pid;
        int i;
 
-       if (np) {
-               pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
-               pdev->dev.platform_data = pdata;
-               ret = fsmc_nand_probe_config_dt(pdev, np);
-               if (ret) {
-                       dev_err(&pdev->dev, "no platform data\n");
-                       return -ENODEV;
-               }
-       }
+       pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+       if (!pdata)
+               return -ENOMEM;
 
-       if (!pdata) {
-               dev_err(&pdev->dev, "platform data is NULL\n");
-               return -EINVAL;
+       pdev->dev.platform_data = pdata;
+       ret = fsmc_nand_probe_config_dt(pdev, np);
+       if (ret) {
+               dev_err(&pdev->dev, "no platform data\n");
+               return -ENODEV;
        }
 
        /* Allocate memory for the device structure (and zero it) */
index 53bafe23ab39eeffb1c51858ac9acd19cf8ca01b..a0669a33f8fe61334a07ec058989d865f3c6ab73 100644 (file)
@@ -797,22 +797,17 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
        struct resource *rc;
        int res;
 
-       rc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (rc == NULL) {
-               dev_err(&pdev->dev, "No memory resource found for device\n");
-               return -EBUSY;
-       }
-
        /* Allocate memory for the device structure (and zero it) */
        host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
        if (!host)
                return -ENOMEM;
-       host->io_base_dma = rc->start;
 
+       rc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        host->io_base = devm_ioremap_resource(&pdev->dev, rc);
        if (IS_ERR(host->io_base))
                return PTR_ERR(host->io_base);
 
+       host->io_base_dma = rc->start;
        if (pdev->dev.of_node)
                host->ncfg = lpc32xx_parse_dt(&pdev->dev);
        if (!host->ncfg) {
index 6c3eed3c20941c0f9ab0ddb5f7ec86ad623a9b7e..6c517c682939db436eb040e7481682c6ae309e69 100644 (file)
@@ -1383,7 +1383,6 @@ static int mtk_nfc_probe(struct platform_device *pdev)
        nfc->regs = devm_ioremap_resource(dev, res);
        if (IS_ERR(nfc->regs)) {
                ret = PTR_ERR(nfc->regs);
-               dev_err(dev, "no nfi base\n");
                goto release_ecc;
        }
 
index ec1c28aaaf23c4bb509a9710121224e46e358dde..1492c12906f6bd3f83bedc05c14f8b7ee7d80963 100644 (file)
@@ -3262,6 +3262,42 @@ static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
        return nand_block_markbad_lowlevel(mtd, ofs);
 }
 
+/**
+ * nand_max_bad_blocks - [MTD Interface] Max number of bad blocks for an mtd
+ * @mtd: MTD device structure
+ * @ofs: offset relative to mtd start
+ * @len: length of mtd
+ */
+static int nand_max_bad_blocks(struct mtd_info *mtd, loff_t ofs, size_t len)
+{
+       struct nand_chip *chip = mtd_to_nand(mtd);
+       u32 part_start_block;
+       u32 part_end_block;
+       u32 part_start_die;
+       u32 part_end_die;
+
+       /*
+        * max_bb_per_die and blocks_per_die used to determine
+        * the maximum bad block count.
+        */
+       if (!chip->max_bb_per_die || !chip->blocks_per_die)
+               return -ENOTSUPP;
+
+       /* Get the start and end of the partition in erase blocks. */
+       part_start_block = mtd_div_by_eb(ofs, mtd);
+       part_end_block = mtd_div_by_eb(len, mtd) + part_start_block - 1;
+
+       /* Get the start and end LUNs of the partition. */
+       part_start_die = part_start_block / chip->blocks_per_die;
+       part_end_die = part_end_block / chip->blocks_per_die;
+
+       /*
+        * Look up the bad blocks per unit and multiply by the number of units
+        * that the partition spans.
+        */
+       return chip->max_bb_per_die * (part_end_die - part_start_die + 1);
+}
+
 /**
  * nand_onfi_set_features- [REPLACEABLE] set features for ONFI nand
  * @mtd: MTD device structure
@@ -3592,6 +3628,9 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
        chip->chipsize *= (uint64_t)mtd->erasesize * p->lun_count;
        chip->bits_per_cell = p->bits_per_cell;
 
+       chip->max_bb_per_die = le16_to_cpu(p->bb_per_lun);
+       chip->blocks_per_die = le32_to_cpu(p->blocks_per_lun);
+
        if (onfi_feature(chip) & ONFI_FEATURE_16_BIT_BUS)
                *busw = NAND_BUSWIDTH_16;
        else
@@ -4815,6 +4854,7 @@ int nand_scan_tail(struct mtd_info *mtd)
        mtd->_block_isreserved = nand_block_isreserved;
        mtd->_block_isbad = nand_block_isbad;
        mtd->_block_markbad = nand_block_markbad;
+       mtd->_max_bad_blocks = nand_max_bad_blocks;
        mtd->writebufsize = mtd->writesize;
 
        /*
index b3a332f37e145044b7b47f999b20382648d16b3e..4a2f75b0c200a1135e64941bf24b753c356775a0 100644 (file)
@@ -185,6 +185,7 @@ struct nand_manufacturers nand_manuf_ids[] = {
        {NAND_MFR_SANDISK, "SanDisk"},
        {NAND_MFR_INTEL, "Intel"},
        {NAND_MFR_ATO, "ATO"},
+       {NAND_MFR_WINBOND, "Winbond"},
        {0x0, "Unknown"}
 };
 
index e40482a65de6683264950a0caeba55484b1ffedf..0eeeb8b889ea8af83a12126ded319e84f0389618 100644 (file)
@@ -321,6 +321,10 @@ static int sunxi_nfc_wait_events(struct sunxi_nfc *nfc, u32 events,
 
                ret = wait_for_completion_timeout(&nfc->complete,
                                                msecs_to_jiffies(timeout_ms));
+               if (!ret)
+                       ret = -ETIMEDOUT;
+               else
+                       ret = 0;
 
                writel(0, nfc->regs + NFC_REG_INT);
        } else {
@@ -518,6 +522,8 @@ static void sunxi_nfc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
        u32 tmp;
 
        while (len > offs) {
+               bool poll = false;
+
                cnt = min(len - offs, NFC_SRAM_SIZE);
 
                ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
@@ -528,7 +534,11 @@ static void sunxi_nfc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
                tmp = NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD;
                writel(tmp, nfc->regs + NFC_REG_CMD);
 
-               ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, true, 0);
+               /* Arbitrary limit for polling mode */
+               if (cnt < 64)
+                       poll = true;
+
+               ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, poll, 0);
                if (ret)
                        break;
 
@@ -551,6 +561,8 @@ static void sunxi_nfc_write_buf(struct mtd_info *mtd, const uint8_t *buf,
        u32 tmp;
 
        while (len > offs) {
+               bool poll = false;
+
                cnt = min(len - offs, NFC_SRAM_SIZE);
 
                ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
@@ -563,7 +575,11 @@ static void sunxi_nfc_write_buf(struct mtd_info *mtd, const uint8_t *buf,
                      NFC_ACCESS_DIR;
                writel(tmp, nfc->regs + NFC_REG_CMD);
 
-               ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, true, 0);
+               /* Arbitrary limit for polling mode */
+               if (cnt < 64)
+                       poll = true;
+
+               ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, poll, 0);
                if (ret)
                        break;
 
@@ -588,10 +604,6 @@ static void sunxi_nfc_cmd_ctrl(struct mtd_info *mtd, int dat,
        struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
        int ret;
 
-       ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
-       if (ret)
-               return;
-
        if (dat == NAND_CMD_NONE && (ctrl & NAND_NCE) &&
            !(ctrl & (NAND_CLE | NAND_ALE))) {
                u32 cmd = 0;
@@ -621,6 +633,10 @@ static void sunxi_nfc_cmd_ctrl(struct mtd_info *mtd, int dat,
                        writel(sunxi_nand->addr[1],
                               nfc->regs + NFC_REG_ADDR_HIGH);
 
+               ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
+               if (ret)
+                       return;
+
                writel(cmd, nfc->regs + NFC_REG_CMD);
                sunxi_nand->addr[0] = 0;
                sunxi_nand->addr[1] = 0;
@@ -957,7 +973,7 @@ static int sunxi_nfc_hw_ecc_read_chunk(struct mtd_info *mtd,
        writel(NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD | NFC_ECC_OP,
               nfc->regs + NFC_REG_CMD);
 
-       ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, true, 0);
+       ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, false, 0);
        sunxi_nfc_randomizer_disable(mtd);
        if (ret)
                return ret;
@@ -1069,7 +1085,7 @@ static int sunxi_nfc_hw_ecc_read_chunks_dma(struct mtd_info *mtd, uint8_t *buf,
        writel(NFC_PAGE_OP | NFC_DATA_SWAP_METHOD | NFC_DATA_TRANS,
               nfc->regs + NFC_REG_CMD);
 
-       ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, true, 0);
+       ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, false, 0);
        if (ret)
                dmaengine_terminate_all(nfc->dmac);
 
@@ -1189,7 +1205,7 @@ static int sunxi_nfc_hw_ecc_write_chunk(struct mtd_info *mtd,
               NFC_ACCESS_DIR | NFC_ECC_OP,
               nfc->regs + NFC_REG_CMD);
 
-       ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, true, 0);
+       ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, false, 0);
        sunxi_nfc_randomizer_disable(mtd);
        if (ret)
                return ret;
@@ -1428,7 +1444,7 @@ static int sunxi_nfc_hw_ecc_write_page_dma(struct mtd_info *mtd,
               NFC_DATA_TRANS | NFC_ACCESS_DIR,
               nfc->regs + NFC_REG_CMD);
 
-       ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, true, 0);
+       ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, false, 0);
        if (ret)
                dmaengine_terminate_all(nfc->dmac);
 
index 895101a5e686457c0dc63237efcb33a79c43a559..ddee4005248c20b9c3b1e58afd74caac37f3c3b8 100644 (file)
@@ -3,7 +3,7 @@
  *  under the terms of the GNU General Public License version 2 as published
  *  by the Free Software Foundation.
  *
- *  Copyright Â© 2012 John Crispin <blogic@openwrt.org>
+ *  Copyright Â© 2012 John Crispin <john@phrozen.org>
  *  Copyright Â© 2016 Hauke Mehrtens <hauke@hauke-m.de>
  */
 
index ede407d6e1068826edeebcf1472ab68c12c1fb2d..4644701224931dd2860d8d332b6a1ea5adc01fa3 100644 (file)
@@ -108,6 +108,7 @@ static int parse_ofpart_partitions(struct mtd_info *master,
 
                parts[i].offset = of_read_number(reg, a_cells);
                parts[i].size = of_read_number(reg + a_cells, s_cells);
+               parts[i].of_node = pp;
 
                partname = of_get_property(pp, "label", &len);
                if (!partname)
index 4a682ee0f6325cc19d7a81a2dc857985c686b6ee..7252087ef407e40e1aaff2061ef6b7524c66749d 100644 (file)
@@ -29,6 +29,16 @@ config MTD_SPI_NOR_USE_4K_SECTORS
          Please note that some tools/drivers/filesystems may not work with
          4096 B erase size (e.g. UBIFS requires 15 KiB as a minimum).
 
+config SPI_ASPEED_SMC
+       tristate "Aspeed flash controllers in SPI mode"
+       depends on ARCH_ASPEED || COMPILE_TEST
+       depends on HAS_IOMEM && OF
+       help
+         This enables support for the Firmware Memory controller (FMC)
+         in the Aspeed AST2500/AST2400 SoCs when attached to SPI NOR chips,
+         and support for the SPI flash memory controller (SPI) for
+         the host firmware. The implementation only supports SPI NOR.
+
 config SPI_ATMEL_QUADSPI
        tristate "Atmel Quad SPI Controller"
        depends on ARCH_AT91 || (ARM && COMPILE_TEST)
@@ -40,7 +50,7 @@ config SPI_ATMEL_QUADSPI
 
 config SPI_CADENCE_QUADSPI
        tristate "Cadence Quad SPI controller"
-       depends on OF && ARM
+       depends on OF && (ARM || COMPILE_TEST)
        help
          Enable support for the Cadence Quad SPI Flash controller.
 
@@ -76,4 +86,24 @@ config SPI_NXP_SPIFI
          Flash. Enable this option if you have a device with a SPIFI
          controller and want to access the Flash as a mtd device.
 
+config SPI_INTEL_SPI
+       tristate
+
+config SPI_INTEL_SPI_PLATFORM
+       tristate "Intel PCH/PCU SPI flash platform driver" if EXPERT
+       depends on X86
+       select SPI_INTEL_SPI
+       help
+         This enables platform support for the Intel PCH/PCU SPI
+         controller in master mode. This controller is present in modern
+         Intel hardware and is used to hold BIOS and other persistent
+         settings. Using this driver it is possible to upgrade BIOS
+         directly from Linux.
+
+         Say N here unless you know what you are doing. Overwriting the
+         SPI flash may render the system unbootable.
+
+         To compile this driver as a module, choose M here: the module
+         will be called intel-spi-platform.
+
 endif # MTD_SPI_NOR
index 121695e83542ab80593042254aa73e81d5456d45..72238a793198cfd1f7a2d65ee896860981939baf 100644 (file)
@@ -1,7 +1,10 @@
 obj-$(CONFIG_MTD_SPI_NOR)      += spi-nor.o
+obj-$(CONFIG_SPI_ASPEED_SMC)   += aspeed-smc.o
 obj-$(CONFIG_SPI_ATMEL_QUADSPI)        += atmel-quadspi.o
 obj-$(CONFIG_SPI_CADENCE_QUADSPI)      += cadence-quadspi.o
 obj-$(CONFIG_SPI_FSL_QUADSPI)  += fsl-quadspi.o
 obj-$(CONFIG_SPI_HISI_SFC)     += hisi-sfc.o
 obj-$(CONFIG_MTD_MT81xx_NOR)    += mtk-quadspi.o
 obj-$(CONFIG_SPI_NXP_SPIFI)    += nxp-spifi.o
+obj-$(CONFIG_SPI_INTEL_SPI)    += intel-spi.o
+obj-$(CONFIG_SPI_INTEL_SPI_PLATFORM)   += intel-spi-platform.o
diff --git a/drivers/mtd/spi-nor/aspeed-smc.c b/drivers/mtd/spi-nor/aspeed-smc.c
new file mode 100644 (file)
index 0000000..56051d3
--- /dev/null
@@ -0,0 +1,754 @@
+/*
+ * ASPEED Static Memory Controller driver
+ *
+ * Copyright (c) 2015-2016, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/bug.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/spi-nor.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/sysfs.h>
+
+#define DEVICE_NAME    "aspeed-smc"
+
+/*
+ * The driver only support SPI flash
+ */
+enum aspeed_smc_flash_type {
+       smc_type_nor  = 0,
+       smc_type_nand = 1,
+       smc_type_spi  = 2,
+};
+
+struct aspeed_smc_chip;
+
+struct aspeed_smc_info {
+       u32 maxsize;            /* maximum size of chip window */
+       u8 nce;                 /* number of chip enables */
+       bool hastype;           /* flash type field exists in config reg */
+       u8 we0;                 /* shift for write enable bit for CE0 */
+       u8 ctl0;                /* offset in regs of ctl for CE0 */
+
+       void (*set_4b)(struct aspeed_smc_chip *chip);
+};
+
+static void aspeed_smc_chip_set_4b_spi_2400(struct aspeed_smc_chip *chip);
+static void aspeed_smc_chip_set_4b(struct aspeed_smc_chip *chip);
+
+static const struct aspeed_smc_info fmc_2400_info = {
+       .maxsize = 64 * 1024 * 1024,
+       .nce = 5,
+       .hastype = true,
+       .we0 = 16,
+       .ctl0 = 0x10,
+       .set_4b = aspeed_smc_chip_set_4b,
+};
+
+static const struct aspeed_smc_info spi_2400_info = {
+       .maxsize = 64 * 1024 * 1024,
+       .nce = 1,
+       .hastype = false,
+       .we0 = 0,
+       .ctl0 = 0x04,
+       .set_4b = aspeed_smc_chip_set_4b_spi_2400,
+};
+
+static const struct aspeed_smc_info fmc_2500_info = {
+       .maxsize = 256 * 1024 * 1024,
+       .nce = 3,
+       .hastype = true,
+       .we0 = 16,
+       .ctl0 = 0x10,
+       .set_4b = aspeed_smc_chip_set_4b,
+};
+
+static const struct aspeed_smc_info spi_2500_info = {
+       .maxsize = 128 * 1024 * 1024,
+       .nce = 2,
+       .hastype = false,
+       .we0 = 16,
+       .ctl0 = 0x10,
+       .set_4b = aspeed_smc_chip_set_4b,
+};
+
+enum aspeed_smc_ctl_reg_value {
+       smc_base,               /* base value without mode for other commands */
+       smc_read,               /* command reg for (maybe fast) reads */
+       smc_write,              /* command reg for writes */
+       smc_max,
+};
+
+struct aspeed_smc_controller;
+
+struct aspeed_smc_chip {
+       int cs;
+       struct aspeed_smc_controller *controller;
+       void __iomem *ctl;                      /* control register */
+       void __iomem *ahb_base;                 /* base of chip window */
+       u32 ctl_val[smc_max];                   /* control settings */
+       enum aspeed_smc_flash_type type;        /* what type of flash */
+       struct spi_nor nor;
+};
+
+struct aspeed_smc_controller {
+       struct device *dev;
+
+       struct mutex mutex;                     /* controller access mutex */
+       const struct aspeed_smc_info *info;     /* type info of controller */
+       void __iomem *regs;                     /* controller registers */
+       void __iomem *ahb_base;                 /* per-chip windows resource */
+
+       struct aspeed_smc_chip *chips[0];       /* pointers to attached chips */
+};
+
+/*
+ * SPI Flash Configuration Register (AST2500 SPI)
+ *     or
+ * Type setting Register (AST2500 FMC).
+ * CE0 and CE1 can only be of type SPI. CE2 can be of type NOR but the
+ * driver does not support it.
+ */
+#define CONFIG_REG                     0x0
+#define CONFIG_DISABLE_LEGACY          BIT(31) /* 1 */
+
+#define CONFIG_CE2_WRITE               BIT(18)
+#define CONFIG_CE1_WRITE               BIT(17)
+#define CONFIG_CE0_WRITE               BIT(16)
+
+#define CONFIG_CE2_TYPE                        BIT(4) /* AST2500 FMC only */
+#define CONFIG_CE1_TYPE                        BIT(2) /* AST2500 FMC only */
+#define CONFIG_CE0_TYPE                        BIT(0) /* AST2500 FMC only */
+
+/*
+ * CE Control Register
+ */
+#define CE_CONTROL_REG                 0x4
+
+/*
+ * CEx Control Register
+ */
+#define CONTROL_AAF_MODE               BIT(31)
+#define CONTROL_IO_MODE_MASK           GENMASK(30, 28)
+#define CONTROL_IO_DUAL_DATA           BIT(29)
+#define CONTROL_IO_DUAL_ADDR_DATA      (BIT(29) | BIT(28))
+#define CONTROL_IO_QUAD_DATA           BIT(30)
+#define CONTROL_IO_QUAD_ADDR_DATA      (BIT(30) | BIT(28))
+#define CONTROL_CE_INACTIVE_SHIFT      24
+#define CONTROL_CE_INACTIVE_MASK       GENMASK(27, \
+                                       CONTROL_CE_INACTIVE_SHIFT)
+/* 0 = 16T ... 15 = 1T   T=HCLK */
+#define CONTROL_COMMAND_SHIFT          16
+#define CONTROL_DUMMY_COMMAND_OUT      BIT(15)
+#define CONTROL_IO_DUMMY_HI            BIT(14)
+#define CONTROL_IO_DUMMY_HI_SHIFT      14
+#define CONTROL_CLK_DIV4               BIT(13) /* others */
+#define CONTROL_IO_ADDRESS_4B          BIT(13) /* AST2400 SPI */
+#define CONTROL_RW_MERGE               BIT(12)
+#define CONTROL_IO_DUMMY_LO_SHIFT      6
+#define CONTROL_IO_DUMMY_LO            GENMASK(7, \
+                                               CONTROL_IO_DUMMY_LO_SHIFT)
+#define CONTROL_IO_DUMMY_MASK          (CONTROL_IO_DUMMY_HI | \
+                                        CONTROL_IO_DUMMY_LO)
+#define CONTROL_IO_DUMMY_SET(dummy)                             \
+       (((((dummy) >> 2) & 0x1) << CONTROL_IO_DUMMY_HI_SHIFT) | \
+        (((dummy) & 0x3) << CONTROL_IO_DUMMY_LO_SHIFT))
+
+#define CONTROL_CLOCK_FREQ_SEL_SHIFT   8
+#define CONTROL_CLOCK_FREQ_SEL_MASK    GENMASK(11, \
+                                               CONTROL_CLOCK_FREQ_SEL_SHIFT)
+#define CONTROL_LSB_FIRST              BIT(5)
+#define CONTROL_CLOCK_MODE_3           BIT(4)
+#define CONTROL_IN_DUAL_DATA           BIT(3)
+#define CONTROL_CE_STOP_ACTIVE_CONTROL BIT(2)
+#define CONTROL_COMMAND_MODE_MASK      GENMASK(1, 0)
+#define CONTROL_COMMAND_MODE_NORMAL    0
+#define CONTROL_COMMAND_MODE_FREAD     1
+#define CONTROL_COMMAND_MODE_WRITE     2
+#define CONTROL_COMMAND_MODE_USER      3
+
+#define CONTROL_KEEP_MASK                                              \
+       (CONTROL_AAF_MODE | CONTROL_CE_INACTIVE_MASK | CONTROL_CLK_DIV4 | \
+        CONTROL_IO_DUMMY_MASK | CONTROL_CLOCK_FREQ_SEL_MASK |          \
+        CONTROL_LSB_FIRST | CONTROL_CLOCK_MODE_3)
+
+/*
+ * The Segment Register uses a 8MB unit to encode the start address
+ * and the end address of the mapping window of a flash SPI slave :
+ *
+ *        | byte 1 | byte 2 | byte 3 | byte 4 |
+ *        +--------+--------+--------+--------+
+ *        |  end   |  start |   0    |   0    |
+ */
+#define SEGMENT_ADDR_REG0              0x30
+#define SEGMENT_ADDR_START(_r)         ((((_r) >> 16) & 0xFF) << 23)
+#define SEGMENT_ADDR_END(_r)           ((((_r) >> 24) & 0xFF) << 23)
+
+/*
+ * In user mode all data bytes read or written to the chip decode address
+ * range are transferred to or from the SPI bus. The range is treated as a
+ * fifo of arbitratry 1, 2, or 4 byte width but each write has to be aligned
+ * to its size. The address within the multiple 8kB range is ignored when
+ * sending bytes to the SPI bus.
+ *
+ * On the arm architecture, as of Linux version 4.3, memcpy_fromio and
+ * memcpy_toio on little endian targets use the optimized memcpy routines
+ * that were designed for well behavied memory storage. These routines
+ * have a stutter if the source and destination are not both word aligned,
+ * once with a duplicate access to the source after aligning to the
+ * destination to a word boundary, and again with a duplicate access to
+ * the source when the final byte count is not word aligned.
+ *
+ * When writing or reading the fifo this stutter discards data or sends
+ * too much data to the fifo and can not be used by this driver.
+ *
+ * While the low level io string routines that implement the insl family do
+ * the desired accesses and memory increments, the cross architecture io
+ * macros make them essentially impossible to use on a memory mapped address
+ * instead of a a token from the call to iomap of an io port.
+ *
+ * These fifo routines use readl and friends to a constant io port and update
+ * the memory buffer pointer and count via explicit code. The final updates
+ * to len are optimistically suppressed.
+ */
+static int aspeed_smc_read_from_ahb(void *buf, void __iomem *src, size_t len)
+{
+       size_t offset = 0;
+
+       if (IS_ALIGNED((uintptr_t)src, sizeof(uintptr_t)) &&
+           IS_ALIGNED((uintptr_t)buf, sizeof(uintptr_t))) {
+               ioread32_rep(src, buf, len >> 2);
+               offset = len & ~0x3;
+               len -= offset;
+       }
+       ioread8_rep(src, (u8 *)buf + offset, len);
+       return 0;
+}
+
+static int aspeed_smc_write_to_ahb(void __iomem *dst, const void *buf,
+                                  size_t len)
+{
+       size_t offset = 0;
+
+       if (IS_ALIGNED((uintptr_t)dst, sizeof(uintptr_t)) &&
+           IS_ALIGNED((uintptr_t)buf, sizeof(uintptr_t))) {
+               iowrite32_rep(dst, buf, len >> 2);
+               offset = len & ~0x3;
+               len -= offset;
+       }
+       iowrite8_rep(dst, (const u8 *)buf + offset, len);
+       return 0;
+}
+
+static inline u32 aspeed_smc_chip_write_bit(struct aspeed_smc_chip *chip)
+{
+       return BIT(chip->controller->info->we0 + chip->cs);
+}
+
+static void aspeed_smc_chip_check_config(struct aspeed_smc_chip *chip)
+{
+       struct aspeed_smc_controller *controller = chip->controller;
+       u32 reg;
+
+       reg = readl(controller->regs + CONFIG_REG);
+
+       if (reg & aspeed_smc_chip_write_bit(chip))
+               return;
+
+       dev_dbg(controller->dev, "config write is not set ! @%p: 0x%08x\n",
+               controller->regs + CONFIG_REG, reg);
+       reg |= aspeed_smc_chip_write_bit(chip);
+       writel(reg, controller->regs + CONFIG_REG);
+}
+
+static void aspeed_smc_start_user(struct spi_nor *nor)
+{
+       struct aspeed_smc_chip *chip = nor->priv;
+       u32 ctl = chip->ctl_val[smc_base];
+
+       /*
+        * When the chip is controlled in user mode, we need write
+        * access to send the opcodes to it. So check the config.
+        */
+       aspeed_smc_chip_check_config(chip);
+
+       ctl |= CONTROL_COMMAND_MODE_USER |
+               CONTROL_CE_STOP_ACTIVE_CONTROL;
+       writel(ctl, chip->ctl);
+
+       ctl &= ~CONTROL_CE_STOP_ACTIVE_CONTROL;
+       writel(ctl, chip->ctl);
+}
+
+static void aspeed_smc_stop_user(struct spi_nor *nor)
+{
+       struct aspeed_smc_chip *chip = nor->priv;
+
+       u32 ctl = chip->ctl_val[smc_read];
+       u32 ctl2 = ctl | CONTROL_COMMAND_MODE_USER |
+               CONTROL_CE_STOP_ACTIVE_CONTROL;
+
+       writel(ctl2, chip->ctl);        /* stop user CE control */
+       writel(ctl, chip->ctl);         /* default to fread or read mode */
+}
+
+static int aspeed_smc_prep(struct spi_nor *nor, enum spi_nor_ops ops)
+{
+       struct aspeed_smc_chip *chip = nor->priv;
+
+       mutex_lock(&chip->controller->mutex);
+       return 0;
+}
+
+static void aspeed_smc_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
+{
+       struct aspeed_smc_chip *chip = nor->priv;
+
+       mutex_unlock(&chip->controller->mutex);
+}
+
+static int aspeed_smc_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
+{
+       struct aspeed_smc_chip *chip = nor->priv;
+
+       aspeed_smc_start_user(nor);
+       aspeed_smc_write_to_ahb(chip->ahb_base, &opcode, 1);
+       aspeed_smc_read_from_ahb(buf, chip->ahb_base, len);
+       aspeed_smc_stop_user(nor);
+       return 0;
+}
+
+static int aspeed_smc_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf,
+                               int len)
+{
+       struct aspeed_smc_chip *chip = nor->priv;
+
+       aspeed_smc_start_user(nor);
+       aspeed_smc_write_to_ahb(chip->ahb_base, &opcode, 1);
+       aspeed_smc_write_to_ahb(chip->ahb_base, buf, len);
+       aspeed_smc_stop_user(nor);
+       return 0;
+}
+
+static void aspeed_smc_send_cmd_addr(struct spi_nor *nor, u8 cmd, u32 addr)
+{
+       struct aspeed_smc_chip *chip = nor->priv;
+       __be32 temp;
+       u32 cmdaddr;
+
+       switch (nor->addr_width) {
+       default:
+               WARN_ONCE(1, "Unexpected address width %u, defaulting to 3\n",
+                         nor->addr_width);
+               /* FALLTHROUGH */
+       case 3:
+               cmdaddr = addr & 0xFFFFFF;
+               cmdaddr |= cmd << 24;
+
+               temp = cpu_to_be32(cmdaddr);
+               aspeed_smc_write_to_ahb(chip->ahb_base, &temp, 4);
+               break;
+       case 4:
+               temp = cpu_to_be32(addr);
+               aspeed_smc_write_to_ahb(chip->ahb_base, &cmd, 1);
+               aspeed_smc_write_to_ahb(chip->ahb_base, &temp, 4);
+               break;
+       }
+}
+
+static ssize_t aspeed_smc_read_user(struct spi_nor *nor, loff_t from,
+                                   size_t len, u_char *read_buf)
+{
+       struct aspeed_smc_chip *chip = nor->priv;
+       int i;
+       u8 dummy = 0xFF;
+
+       aspeed_smc_start_user(nor);
+       aspeed_smc_send_cmd_addr(nor, nor->read_opcode, from);
+       for (i = 0; i < chip->nor.read_dummy / 8; i++)
+               aspeed_smc_write_to_ahb(chip->ahb_base, &dummy, sizeof(dummy));
+
+       aspeed_smc_read_from_ahb(read_buf, chip->ahb_base, len);
+       aspeed_smc_stop_user(nor);
+       return len;
+}
+
+static ssize_t aspeed_smc_write_user(struct spi_nor *nor, loff_t to,
+                                    size_t len, const u_char *write_buf)
+{
+       struct aspeed_smc_chip *chip = nor->priv;
+
+       aspeed_smc_start_user(nor);
+       aspeed_smc_send_cmd_addr(nor, nor->program_opcode, to);
+       aspeed_smc_write_to_ahb(chip->ahb_base, write_buf, len);
+       aspeed_smc_stop_user(nor);
+       return len;
+}
+
+static int aspeed_smc_unregister(struct aspeed_smc_controller *controller)
+{
+       struct aspeed_smc_chip *chip;
+       int n;
+
+       for (n = 0; n < controller->info->nce; n++) {
+               chip = controller->chips[n];
+               if (chip)
+                       mtd_device_unregister(&chip->nor.mtd);
+       }
+
+       return 0;
+}
+
+static int aspeed_smc_remove(struct platform_device *dev)
+{
+       return aspeed_smc_unregister(platform_get_drvdata(dev));
+}
+
+static const struct of_device_id aspeed_smc_matches[] = {
+       { .compatible = "aspeed,ast2400-fmc", .data = &fmc_2400_info },
+       { .compatible = "aspeed,ast2400-spi", .data = &spi_2400_info },
+       { .compatible = "aspeed,ast2500-fmc", .data = &fmc_2500_info },
+       { .compatible = "aspeed,ast2500-spi", .data = &spi_2500_info },
+       { }
+};
+MODULE_DEVICE_TABLE(of, aspeed_smc_matches);
+
+/*
+ * Each chip has a mapping window defined by a segment address
+ * register defining a start and an end address on the AHB bus. These
+ * addresses can be configured to fit the chip size and offer a
+ * contiguous memory region across chips. For the moment, we only
+ * check that each chip segment is valid.
+ */
+static void __iomem *aspeed_smc_chip_base(struct aspeed_smc_chip *chip,
+                                         struct resource *res)
+{
+       struct aspeed_smc_controller *controller = chip->controller;
+       u32 offset = 0;
+       u32 reg;
+
+       if (controller->info->nce > 1) {
+               reg = readl(controller->regs + SEGMENT_ADDR_REG0 +
+                           chip->cs * 4);
+
+               if (SEGMENT_ADDR_START(reg) >= SEGMENT_ADDR_END(reg))
+                       return NULL;
+
+               offset = SEGMENT_ADDR_START(reg) - res->start;
+       }
+
+       return controller->ahb_base + offset;
+}
+
+static void aspeed_smc_chip_enable_write(struct aspeed_smc_chip *chip)
+{
+       struct aspeed_smc_controller *controller = chip->controller;
+       u32 reg;
+
+       reg = readl(controller->regs + CONFIG_REG);
+
+       reg |= aspeed_smc_chip_write_bit(chip);
+       writel(reg, controller->regs + CONFIG_REG);
+}
+
+static void aspeed_smc_chip_set_type(struct aspeed_smc_chip *chip, int type)
+{
+       struct aspeed_smc_controller *controller = chip->controller;
+       u32 reg;
+
+       chip->type = type;
+
+       reg = readl(controller->regs + CONFIG_REG);
+       reg &= ~(3 << (chip->cs * 2));
+       reg |= chip->type << (chip->cs * 2);
+       writel(reg, controller->regs + CONFIG_REG);
+}
+
+/*
+ * The AST2500 FMC flash controller should be strapped by hardware, or
+ * autodetected, but the AST2500 SPI flash needs to be set.
+ */
+static void aspeed_smc_chip_set_4b(struct aspeed_smc_chip *chip)
+{
+       struct aspeed_smc_controller *controller = chip->controller;
+       u32 reg;
+
+       if (chip->controller->info == &spi_2500_info) {
+               reg = readl(controller->regs + CE_CONTROL_REG);
+               reg |= 1 << chip->cs;
+               writel(reg, controller->regs + CE_CONTROL_REG);
+       }
+}
+
+/*
+ * The AST2400 SPI flash controller does not have a CE Control
+ * register. It uses the CE0 control register to set 4Byte mode at the
+ * controller level.
+ */
+static void aspeed_smc_chip_set_4b_spi_2400(struct aspeed_smc_chip *chip)
+{
+       chip->ctl_val[smc_base] |= CONTROL_IO_ADDRESS_4B;
+       chip->ctl_val[smc_read] |= CONTROL_IO_ADDRESS_4B;
+}
+
+static int aspeed_smc_chip_setup_init(struct aspeed_smc_chip *chip,
+                                     struct resource *res)
+{
+       struct aspeed_smc_controller *controller = chip->controller;
+       const struct aspeed_smc_info *info = controller->info;
+       u32 reg, base_reg;
+
+       /*
+        * Always turn on the write enable bit to allow opcodes to be
+        * sent in user mode.
+        */
+       aspeed_smc_chip_enable_write(chip);
+
+       /* The driver only supports SPI type flash */
+       if (info->hastype)
+               aspeed_smc_chip_set_type(chip, smc_type_spi);
+
+       /*
+        * Configure chip base address in memory
+        */
+       chip->ahb_base = aspeed_smc_chip_base(chip, res);
+       if (!chip->ahb_base) {
+               dev_warn(chip->nor.dev, "CE segment window closed.\n");
+               return -EINVAL;
+       }
+
+       /*
+        * Get value of the inherited control register. U-Boot usually
+        * does some timing calibration on the FMC chip, so it's good
+        * to keep them. In the future, we should handle calibration
+        * from Linux.
+        */
+       reg = readl(chip->ctl);
+       dev_dbg(controller->dev, "control register: %08x\n", reg);
+
+       base_reg = reg & CONTROL_KEEP_MASK;
+       if (base_reg != reg) {
+               dev_dbg(controller->dev,
+                       "control register changed to: %08x\n",
+                       base_reg);
+       }
+       chip->ctl_val[smc_base] = base_reg;
+
+       /*
+        * Retain the prior value of the control register as the
+        * default if it was normal access mode. Otherwise start with
+        * the sanitized base value set to read mode.
+        */
+       if ((reg & CONTROL_COMMAND_MODE_MASK) ==
+           CONTROL_COMMAND_MODE_NORMAL)
+               chip->ctl_val[smc_read] = reg;
+       else
+               chip->ctl_val[smc_read] = chip->ctl_val[smc_base] |
+                       CONTROL_COMMAND_MODE_NORMAL;
+
+       dev_dbg(controller->dev, "default control register: %08x\n",
+               chip->ctl_val[smc_read]);
+       return 0;
+}
+
+static int aspeed_smc_chip_setup_finish(struct aspeed_smc_chip *chip)
+{
+       struct aspeed_smc_controller *controller = chip->controller;
+       const struct aspeed_smc_info *info = controller->info;
+       u32 cmd;
+
+       if (chip->nor.addr_width == 4 && info->set_4b)
+               info->set_4b(chip);
+
+       /*
+        * base mode has not been optimized yet. use it for writes.
+        */
+       chip->ctl_val[smc_write] = chip->ctl_val[smc_base] |
+               chip->nor.program_opcode << CONTROL_COMMAND_SHIFT |
+               CONTROL_COMMAND_MODE_WRITE;
+
+       dev_dbg(controller->dev, "write control register: %08x\n",
+               chip->ctl_val[smc_write]);
+
+       /*
+        * TODO: Adjust clocks if fast read is supported and interpret
+        * SPI-NOR flags to adjust controller settings.
+        */
+       switch (chip->nor.flash_read) {
+       case SPI_NOR_NORMAL:
+               cmd = CONTROL_COMMAND_MODE_NORMAL;
+               break;
+       case SPI_NOR_FAST:
+               cmd = CONTROL_COMMAND_MODE_FREAD;
+               break;
+       default:
+               dev_err(chip->nor.dev, "unsupported SPI read mode\n");
+               return -EINVAL;
+       }
+
+       chip->ctl_val[smc_read] |= cmd |
+               CONTROL_IO_DUMMY_SET(chip->nor.read_dummy / 8);
+
+       dev_dbg(controller->dev, "base control register: %08x\n",
+               chip->ctl_val[smc_read]);
+       return 0;
+}
+
+static int aspeed_smc_setup_flash(struct aspeed_smc_controller *controller,
+                                 struct device_node *np, struct resource *r)
+{
+       const struct aspeed_smc_info *info = controller->info;
+       struct device *dev = controller->dev;
+       struct device_node *child;
+       unsigned int cs;
+       int ret = -ENODEV;
+
+       for_each_available_child_of_node(np, child) {
+               struct aspeed_smc_chip *chip;
+               struct spi_nor *nor;
+               struct mtd_info *mtd;
+
+               /* This driver does not support NAND or NOR flash devices. */
+               if (!of_device_is_compatible(child, "jedec,spi-nor"))
+                       continue;
+
+               ret = of_property_read_u32(child, "reg", &cs);
+               if (ret) {
+                       dev_err(dev, "Couldn't not read chip select.\n");
+                       break;
+               }
+
+               if (cs >= info->nce) {
+                       dev_err(dev, "Chip select %d out of range.\n",
+                               cs);
+                       ret = -ERANGE;
+                       break;
+               }
+
+               if (controller->chips[cs]) {
+                       dev_err(dev, "Chip select %d already in use by %s\n",
+                               cs, dev_name(controller->chips[cs]->nor.dev));
+                       ret = -EBUSY;
+                       break;
+               }
+
+               chip = devm_kzalloc(controller->dev, sizeof(*chip), GFP_KERNEL);
+               if (!chip) {
+                       ret = -ENOMEM;
+                       break;
+               }
+
+               chip->controller = controller;
+               chip->ctl = controller->regs + info->ctl0 + cs * 4;
+               chip->cs = cs;
+
+               nor = &chip->nor;
+               mtd = &nor->mtd;
+
+               nor->dev = dev;
+               nor->priv = chip;
+               spi_nor_set_flash_node(nor, child);
+               nor->read = aspeed_smc_read_user;
+               nor->write = aspeed_smc_write_user;
+               nor->read_reg = aspeed_smc_read_reg;
+               nor->write_reg = aspeed_smc_write_reg;
+               nor->prepare = aspeed_smc_prep;
+               nor->unprepare = aspeed_smc_unprep;
+
+               ret = aspeed_smc_chip_setup_init(chip, r);
+               if (ret)
+                       break;
+
+               /*
+                * TODO: Add support for SPI_NOR_QUAD and SPI_NOR_DUAL
+                * attach when board support is present as determined
+                * by of property.
+                */
+               ret = spi_nor_scan(nor, NULL, SPI_NOR_NORMAL);
+               if (ret)
+                       break;
+
+               ret = aspeed_smc_chip_setup_finish(chip);
+               if (ret)
+                       break;
+
+               ret = mtd_device_register(mtd, NULL, 0);
+               if (ret)
+                       break;
+
+               controller->chips[cs] = chip;
+       }
+
+       if (ret)
+               aspeed_smc_unregister(controller);
+
+       return ret;
+}
+
+static int aspeed_smc_probe(struct platform_device *pdev)
+{
+       struct device_node *np = pdev->dev.of_node;
+       struct device *dev = &pdev->dev;
+       struct aspeed_smc_controller *controller;
+       const struct of_device_id *match;
+       const struct aspeed_smc_info *info;
+       struct resource *res;
+       int ret;
+
+       match = of_match_device(aspeed_smc_matches, &pdev->dev);
+       if (!match || !match->data)
+               return -ENODEV;
+       info = match->data;
+
+       controller = devm_kzalloc(&pdev->dev, sizeof(*controller) +
+               info->nce * sizeof(controller->chips[0]), GFP_KERNEL);
+       if (!controller)
+               return -ENOMEM;
+       controller->info = info;
+       controller->dev = dev;
+
+       mutex_init(&controller->mutex);
+       platform_set_drvdata(pdev, controller);
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       controller->regs = devm_ioremap_resource(dev, res);
+       if (IS_ERR(controller->regs))
+               return PTR_ERR(controller->regs);
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+       controller->ahb_base = devm_ioremap_resource(dev, res);
+       if (IS_ERR(controller->ahb_base))
+               return PTR_ERR(controller->ahb_base);
+
+       ret = aspeed_smc_setup_flash(controller, np, res);
+       if (ret)
+               dev_err(dev, "Aspeed SMC probe failed %d\n", ret);
+
+       return ret;
+}
+
+static struct platform_driver aspeed_smc_driver = {
+       .probe = aspeed_smc_probe,
+       .remove = aspeed_smc_remove,
+       .driver = {
+               .name = DEVICE_NAME,
+               .of_match_table = aspeed_smc_matches,
+       }
+};
+
+module_platform_driver(aspeed_smc_driver);
+
+MODULE_DESCRIPTION("ASPEED Static Memory Controller Driver");
+MODULE_AUTHOR("Cedric Le Goater <clg@kaod.org>");
+MODULE_LICENSE("GPL v2");
index d489fbd07c12b6e73bf0f73e5f3de2696d4da3c4..9f8102de1b16a1786c5fb06f7980acc872433dd5 100644 (file)
@@ -526,7 +526,8 @@ static int cqspi_indirect_read_execute(struct spi_nor *nor,
                        bytes_to_read *= cqspi->fifo_width;
                        bytes_to_read = bytes_to_read > remaining ?
                                        remaining : bytes_to_read;
-                       readsl(ahb_base, rxbuf, DIV_ROUND_UP(bytes_to_read, 4));
+                       ioread32_rep(ahb_base, rxbuf,
+                                    DIV_ROUND_UP(bytes_to_read, 4));
                        rxbuf += bytes_to_read;
                        remaining -= bytes_to_read;
                        bytes_to_read = cqspi_get_rd_sram_level(cqspi);
@@ -610,7 +611,8 @@ static int cqspi_indirect_write_execute(struct spi_nor *nor,
 
        while (remaining > 0) {
                write_bytes = remaining > page_size ? page_size : remaining;
-               writesl(cqspi->ahb_base, txbuf, DIV_ROUND_UP(write_bytes, 4));
+               iowrite32_rep(cqspi->ahb_base, txbuf,
+                             DIV_ROUND_UP(write_bytes, 4));
 
                ret = wait_for_completion_timeout(&cqspi->transfer_complete,
                                                  msecs_to_jiffies
@@ -891,7 +893,7 @@ static ssize_t cqspi_write(struct spi_nor *nor, loff_t to,
        if (ret)
                return ret;
 
-       return (ret < 0) ? ret : len;
+       return len;
 }
 
 static ssize_t cqspi_read(struct spi_nor *nor, loff_t from,
@@ -911,7 +913,7 @@ static ssize_t cqspi_read(struct spi_nor *nor, loff_t from,
        if (ret)
                return ret;
 
-       return (ret < 0) ? ret : len;
+       return len;
 }
 
 static int cqspi_erase(struct spi_nor *nor, loff_t offs)
index b4d8953fb30a4cc7efe59e62e2eadff3d60a2c9e..1476135e0d50176312c4534de0b1386296e79405 100644 (file)
 #define QUADSPI_LUT_NUM                64
 
 /* SEQID -- we can have 16 seqids at most. */
-#define SEQID_QUAD_READ                0
+#define SEQID_READ             0
 #define SEQID_WREN             1
 #define SEQID_WRDI             2
 #define SEQID_RDSR             3
@@ -373,32 +373,26 @@ static void fsl_qspi_init_lut(struct fsl_qspi *q)
        void __iomem *base = q->iobase;
        int rxfifo = q->devtype_data->rxfifo;
        u32 lut_base;
-       u8 cmd, addrlen, dummy;
        int i;
 
+       struct spi_nor *nor = &q->nor[0];
+       u8 addrlen = (nor->addr_width == 3) ? ADDR24BIT : ADDR32BIT;
+       u8 read_op = nor->read_opcode;
+       u8 read_dm = nor->read_dummy;
+
        fsl_qspi_unlock_lut(q);
 
        /* Clear all the LUT table */
        for (i = 0; i < QUADSPI_LUT_NUM; i++)
                qspi_writel(q, 0, base + QUADSPI_LUT_BASE + i * 4);
 
-       /* Quad Read */
-       lut_base = SEQID_QUAD_READ * 4;
-
-       if (q->nor_size <= SZ_16M) {
-               cmd = SPINOR_OP_READ_1_1_4;
-               addrlen = ADDR24BIT;
-               dummy = 8;
-       } else {
-               /* use the 4-byte address */
-               cmd = SPINOR_OP_READ_1_1_4;
-               addrlen = ADDR32BIT;
-               dummy = 8;
-       }
+       /* Read */
+       lut_base = SEQID_READ * 4;
 
-       qspi_writel(q, LUT0(CMD, PAD1, cmd) | LUT1(ADDR, PAD1, addrlen),
+       qspi_writel(q, LUT0(CMD, PAD1, read_op) | LUT1(ADDR, PAD1, addrlen),
                        base + QUADSPI_LUT(lut_base));
-       qspi_writel(q, LUT0(DUMMY, PAD1, dummy) | LUT1(FSL_READ, PAD4, rxfifo),
+       qspi_writel(q, LUT0(DUMMY, PAD1, read_dm) |
+                   LUT1(FSL_READ, PAD4, rxfifo),
                        base + QUADSPI_LUT(lut_base + 1));
 
        /* Write enable */
@@ -409,16 +403,8 @@ static void fsl_qspi_init_lut(struct fsl_qspi *q)
        /* Page Program */
        lut_base = SEQID_PP * 4;
 
-       if (q->nor_size <= SZ_16M) {
-               cmd = SPINOR_OP_PP;
-               addrlen = ADDR24BIT;
-       } else {
-               /* use the 4-byte address */
-               cmd = SPINOR_OP_PP;
-               addrlen = ADDR32BIT;
-       }
-
-       qspi_writel(q, LUT0(CMD, PAD1, cmd) | LUT1(ADDR, PAD1, addrlen),
+       qspi_writel(q, LUT0(CMD, PAD1, nor->program_opcode) |
+                   LUT1(ADDR, PAD1, addrlen),
                        base + QUADSPI_LUT(lut_base));
        qspi_writel(q, LUT0(FSL_WRITE, PAD1, 0),
                        base + QUADSPI_LUT(lut_base + 1));
@@ -432,10 +418,8 @@ static void fsl_qspi_init_lut(struct fsl_qspi *q)
        /* Erase a sector */
        lut_base = SEQID_SE * 4;
 
-       cmd = q->nor[0].erase_opcode;
-       addrlen = q->nor_size <= SZ_16M ? ADDR24BIT : ADDR32BIT;
-
-       qspi_writel(q, LUT0(CMD, PAD1, cmd) | LUT1(ADDR, PAD1, addrlen),
+       qspi_writel(q, LUT0(CMD, PAD1, nor->erase_opcode) |
+                   LUT1(ADDR, PAD1, addrlen),
                        base + QUADSPI_LUT(lut_base));
 
        /* Erase the whole chip */
@@ -484,7 +468,7 @@ static int fsl_qspi_get_seqid(struct fsl_qspi *q, u8 cmd)
 {
        switch (cmd) {
        case SPINOR_OP_READ_1_1_4:
-               return SEQID_QUAD_READ;
+               return SEQID_READ;
        case SPINOR_OP_WREN:
                return SEQID_WREN;
        case SPINOR_OP_WRDI:
diff --git a/drivers/mtd/spi-nor/intel-spi-platform.c b/drivers/mtd/spi-nor/intel-spi-platform.c
new file mode 100644 (file)
index 0000000..5c943df
--- /dev/null
@@ -0,0 +1,57 @@
+/*
+ * Intel PCH/PCU SPI flash platform driver.
+ *
+ * Copyright (C) 2016, Intel Corporation
+ * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/ioport.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "intel-spi.h"
+
+static int intel_spi_platform_probe(struct platform_device *pdev)
+{
+       struct intel_spi_boardinfo *info;
+       struct intel_spi *ispi;
+       struct resource *mem;
+
+       info = dev_get_platdata(&pdev->dev);
+       if (!info)
+               return -EINVAL;
+
+       mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       ispi = intel_spi_probe(&pdev->dev, mem, info);
+       if (IS_ERR(ispi))
+               return PTR_ERR(ispi);
+
+       platform_set_drvdata(pdev, ispi);
+       return 0;
+}
+
+static int intel_spi_platform_remove(struct platform_device *pdev)
+{
+       struct intel_spi *ispi = platform_get_drvdata(pdev);
+
+       return intel_spi_remove(ispi);
+}
+
+static struct platform_driver intel_spi_platform_driver = {
+       .probe = intel_spi_platform_probe,
+       .remove = intel_spi_platform_remove,
+       .driver = {
+               .name = "intel-spi",
+       },
+};
+
+module_platform_driver(intel_spi_platform_driver);
+
+MODULE_DESCRIPTION("Intel PCH/PCU SPI flash platform driver");
+MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:intel-spi");
diff --git a/drivers/mtd/spi-nor/intel-spi.c b/drivers/mtd/spi-nor/intel-spi.c
new file mode 100644 (file)
index 0000000..a10f602
--- /dev/null
@@ -0,0 +1,777 @@
+/*
+ * Intel PCH/PCU SPI flash driver.
+ *
+ * Copyright (C) 2016, Intel Corporation
+ * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/sizes.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/spi-nor.h>
+#include <linux/platform_data/intel-spi.h>
+
+#include "intel-spi.h"
+
+/* Offsets are from @ispi->base */
+#define BFPREG                         0x00
+
+#define HSFSTS_CTL                     0x04
+#define HSFSTS_CTL_FSMIE               BIT(31)
+#define HSFSTS_CTL_FDBC_SHIFT          24
+#define HSFSTS_CTL_FDBC_MASK           (0x3f << HSFSTS_CTL_FDBC_SHIFT)
+
+#define HSFSTS_CTL_FCYCLE_SHIFT                17
+#define HSFSTS_CTL_FCYCLE_MASK         (0x0f << HSFSTS_CTL_FCYCLE_SHIFT)
+/* HW sequencer opcodes */
+#define HSFSTS_CTL_FCYCLE_READ         (0x00 << HSFSTS_CTL_FCYCLE_SHIFT)
+#define HSFSTS_CTL_FCYCLE_WRITE                (0x02 << HSFSTS_CTL_FCYCLE_SHIFT)
+#define HSFSTS_CTL_FCYCLE_ERASE                (0x03 << HSFSTS_CTL_FCYCLE_SHIFT)
+#define HSFSTS_CTL_FCYCLE_ERASE_64K    (0x04 << HSFSTS_CTL_FCYCLE_SHIFT)
+#define HSFSTS_CTL_FCYCLE_RDID         (0x06 << HSFSTS_CTL_FCYCLE_SHIFT)
+#define HSFSTS_CTL_FCYCLE_WRSR         (0x07 << HSFSTS_CTL_FCYCLE_SHIFT)
+#define HSFSTS_CTL_FCYCLE_RDSR         (0x08 << HSFSTS_CTL_FCYCLE_SHIFT)
+
+#define HSFSTS_CTL_FGO                 BIT(16)
+#define HSFSTS_CTL_FLOCKDN             BIT(15)
+#define HSFSTS_CTL_FDV                 BIT(14)
+#define HSFSTS_CTL_SCIP                        BIT(5)
+#define HSFSTS_CTL_AEL                 BIT(2)
+#define HSFSTS_CTL_FCERR               BIT(1)
+#define HSFSTS_CTL_FDONE               BIT(0)
+
+#define FADDR                          0x08
+#define DLOCK                          0x0c
+#define FDATA(n)                       (0x10 + ((n) * 4))
+
+#define FRACC                          0x50
+
+#define FREG(n)                                (0x54 + ((n) * 4))
+#define FREG_BASE_MASK                 0x3fff
+#define FREG_LIMIT_SHIFT               16
+#define FREG_LIMIT_MASK                        (0x03fff << FREG_LIMIT_SHIFT)
+
+/* Offset is from @ispi->pregs */
+#define PR(n)                          ((n) * 4)
+#define PR_WPE                         BIT(31)
+#define PR_LIMIT_SHIFT                 16
+#define PR_LIMIT_MASK                  (0x3fff << PR_LIMIT_SHIFT)
+#define PR_RPE                         BIT(15)
+#define PR_BASE_MASK                   0x3fff
+/* Last PR is GPR0 */
+#define PR_NUM                         (5 + 1)
+
+/* Offsets are from @ispi->sregs */
+#define SSFSTS_CTL                     0x00
+#define SSFSTS_CTL_FSMIE               BIT(23)
+#define SSFSTS_CTL_DS                  BIT(22)
+#define SSFSTS_CTL_DBC_SHIFT           16
+#define SSFSTS_CTL_SPOP                        BIT(11)
+#define SSFSTS_CTL_ACS                 BIT(10)
+#define SSFSTS_CTL_SCGO                        BIT(9)
+#define SSFSTS_CTL_COP_SHIFT           12
+#define SSFSTS_CTL_FRS                 BIT(7)
+#define SSFSTS_CTL_DOFRS               BIT(6)
+#define SSFSTS_CTL_AEL                 BIT(4)
+#define SSFSTS_CTL_FCERR               BIT(3)
+#define SSFSTS_CTL_FDONE               BIT(2)
+#define SSFSTS_CTL_SCIP                        BIT(0)
+
+#define PREOP_OPTYPE                   0x04
+#define OPMENU0                                0x08
+#define OPMENU1                                0x0c
+
+/* CPU specifics */
+#define BYT_PR                         0x74
+#define BYT_SSFSTS_CTL                 0x90
+#define BYT_BCR                                0xfc
+#define BYT_BCR_WPD                    BIT(0)
+#define BYT_FREG_NUM                   5
+
+#define LPT_PR                         0x74
+#define LPT_SSFSTS_CTL                 0x90
+#define LPT_FREG_NUM                   5
+
+#define BXT_PR                         0x84
+#define BXT_SSFSTS_CTL                 0xa0
+#define BXT_FREG_NUM                   12
+
+#define INTEL_SPI_TIMEOUT              5000 /* ms */
+#define INTEL_SPI_FIFO_SZ              64
+
+/**
+ * struct intel_spi - Driver private data
+ * @dev: Device pointer
+ * @info: Pointer to board specific info
+ * @nor: SPI NOR layer structure
+ * @base: Beginning of MMIO space
+ * @pregs: Start of protection registers
+ * @sregs: Start of software sequencer registers
+ * @nregions: Maximum number of regions
+ * @writeable: Is the chip writeable
+ * @swseq: Use SW sequencer in register reads/writes
+ * @erase_64k: 64k erase supported
+ * @opcodes: Opcodes which are supported. This are programmed by BIOS
+ *           before it locks down the controller.
+ * @preopcodes: Preopcodes which are supported.
+ */
+struct intel_spi {
+       struct device *dev;
+       const struct intel_spi_boardinfo *info;
+       struct spi_nor nor;
+       void __iomem *base;
+       void __iomem *pregs;
+       void __iomem *sregs;
+       size_t nregions;
+       bool writeable;
+       bool swseq;
+       bool erase_64k;
+       u8 opcodes[8];
+       u8 preopcodes[2];
+};
+
+static bool writeable;
+module_param(writeable, bool, 0);
+MODULE_PARM_DESC(writeable, "Enable write access to SPI flash chip (default=0)");
+
+static void intel_spi_dump_regs(struct intel_spi *ispi)
+{
+       u32 value;
+       int i;
+
+       dev_dbg(ispi->dev, "BFPREG=0x%08x\n", readl(ispi->base + BFPREG));
+
+       value = readl(ispi->base + HSFSTS_CTL);
+       dev_dbg(ispi->dev, "HSFSTS_CTL=0x%08x\n", value);
+       if (value & HSFSTS_CTL_FLOCKDN)
+               dev_dbg(ispi->dev, "-> Locked\n");
+
+       dev_dbg(ispi->dev, "FADDR=0x%08x\n", readl(ispi->base + FADDR));
+       dev_dbg(ispi->dev, "DLOCK=0x%08x\n", readl(ispi->base + DLOCK));
+
+       for (i = 0; i < 16; i++)
+               dev_dbg(ispi->dev, "FDATA(%d)=0x%08x\n",
+                       i, readl(ispi->base + FDATA(i)));
+
+       dev_dbg(ispi->dev, "FRACC=0x%08x\n", readl(ispi->base + FRACC));
+
+       for (i = 0; i < ispi->nregions; i++)
+               dev_dbg(ispi->dev, "FREG(%d)=0x%08x\n", i,
+                       readl(ispi->base + FREG(i)));
+       for (i = 0; i < PR_NUM; i++)
+               dev_dbg(ispi->dev, "PR(%d)=0x%08x\n", i,
+                       readl(ispi->pregs + PR(i)));
+
+       value = readl(ispi->sregs + SSFSTS_CTL);
+       dev_dbg(ispi->dev, "SSFSTS_CTL=0x%08x\n", value);
+       dev_dbg(ispi->dev, "PREOP_OPTYPE=0x%08x\n",
+               readl(ispi->sregs + PREOP_OPTYPE));
+       dev_dbg(ispi->dev, "OPMENU0=0x%08x\n", readl(ispi->sregs + OPMENU0));
+       dev_dbg(ispi->dev, "OPMENU1=0x%08x\n", readl(ispi->sregs + OPMENU1));
+
+       if (ispi->info->type == INTEL_SPI_BYT)
+               dev_dbg(ispi->dev, "BCR=0x%08x\n", readl(ispi->base + BYT_BCR));
+
+       dev_dbg(ispi->dev, "Protected regions:\n");
+       for (i = 0; i < PR_NUM; i++) {
+               u32 base, limit;
+
+               value = readl(ispi->pregs + PR(i));
+               if (!(value & (PR_WPE | PR_RPE)))
+                       continue;
+
+               limit = (value & PR_LIMIT_MASK) >> PR_LIMIT_SHIFT;
+               base = value & PR_BASE_MASK;
+
+               dev_dbg(ispi->dev, " %02d base: 0x%08x limit: 0x%08x [%c%c]\n",
+                        i, base << 12, (limit << 12) | 0xfff,
+                        value & PR_WPE ? 'W' : '.',
+                        value & PR_RPE ? 'R' : '.');
+       }
+
+       dev_dbg(ispi->dev, "Flash regions:\n");
+       for (i = 0; i < ispi->nregions; i++) {
+               u32 region, base, limit;
+
+               region = readl(ispi->base + FREG(i));
+               base = region & FREG_BASE_MASK;
+               limit = (region & FREG_LIMIT_MASK) >> FREG_LIMIT_SHIFT;
+
+               if (base >= limit || (i > 0 && limit == 0))
+                       dev_dbg(ispi->dev, " %02d disabled\n", i);
+               else
+                       dev_dbg(ispi->dev, " %02d base: 0x%08x limit: 0x%08x\n",
+                                i, base << 12, (limit << 12) | 0xfff);
+       }
+
+       dev_dbg(ispi->dev, "Using %cW sequencer for register access\n",
+               ispi->swseq ? 'S' : 'H');
+}
+
+/* Reads max INTEL_SPI_FIFO_SZ bytes from the device fifo */
+static int intel_spi_read_block(struct intel_spi *ispi, void *buf, size_t size)
+{
+       size_t bytes;
+       int i = 0;
+
+       if (size > INTEL_SPI_FIFO_SZ)
+               return -EINVAL;
+
+       while (size > 0) {
+               bytes = min_t(size_t, size, 4);
+               memcpy_fromio(buf, ispi->base + FDATA(i), bytes);
+               size -= bytes;
+               buf += bytes;
+               i++;
+       }
+
+       return 0;
+}
+
+/* Writes max INTEL_SPI_FIFO_SZ bytes to the device fifo */
+static int intel_spi_write_block(struct intel_spi *ispi, const void *buf,
+                                size_t size)
+{
+       size_t bytes;
+       int i = 0;
+
+       if (size > INTEL_SPI_FIFO_SZ)
+               return -EINVAL;
+
+       while (size > 0) {
+               bytes = min_t(size_t, size, 4);
+               memcpy_toio(ispi->base + FDATA(i), buf, bytes);
+               size -= bytes;
+               buf += bytes;
+               i++;
+       }
+
+       return 0;
+}
+
+static int intel_spi_wait_hw_busy(struct intel_spi *ispi)
+{
+       u32 val;
+
+       return readl_poll_timeout(ispi->base + HSFSTS_CTL, val,
+                                 !(val & HSFSTS_CTL_SCIP), 0,
+                                 INTEL_SPI_TIMEOUT * 1000);
+}
+
+static int intel_spi_wait_sw_busy(struct intel_spi *ispi)
+{
+       u32 val;
+
+       return readl_poll_timeout(ispi->sregs + SSFSTS_CTL, val,
+                                 !(val & SSFSTS_CTL_SCIP), 0,
+                                 INTEL_SPI_TIMEOUT * 1000);
+}
+
+static int intel_spi_init(struct intel_spi *ispi)
+{
+       u32 opmenu0, opmenu1, val;
+       int i;
+
+       switch (ispi->info->type) {
+       case INTEL_SPI_BYT:
+               ispi->sregs = ispi->base + BYT_SSFSTS_CTL;
+               ispi->pregs = ispi->base + BYT_PR;
+               ispi->nregions = BYT_FREG_NUM;
+
+               if (writeable) {
+                       /* Disable write protection */
+                       val = readl(ispi->base + BYT_BCR);
+                       if (!(val & BYT_BCR_WPD)) {
+                               val |= BYT_BCR_WPD;
+                               writel(val, ispi->base + BYT_BCR);
+                               val = readl(ispi->base + BYT_BCR);
+                       }
+
+                       ispi->writeable = !!(val & BYT_BCR_WPD);
+               }
+
+               break;
+
+       case INTEL_SPI_LPT:
+               ispi->sregs = ispi->base + LPT_SSFSTS_CTL;
+               ispi->pregs = ispi->base + LPT_PR;
+               ispi->nregions = LPT_FREG_NUM;
+               break;
+
+       case INTEL_SPI_BXT:
+               ispi->sregs = ispi->base + BXT_SSFSTS_CTL;
+               ispi->pregs = ispi->base + BXT_PR;
+               ispi->nregions = BXT_FREG_NUM;
+               ispi->erase_64k = true;
+               break;
+
+       default:
+               return -EINVAL;
+       }
+
+       /* Disable #SMI generation */
+       val = readl(ispi->base + HSFSTS_CTL);
+       val &= ~HSFSTS_CTL_FSMIE;
+       writel(val, ispi->base + HSFSTS_CTL);
+
+       /*
+        * BIOS programs allowed opcodes and then locks down the register.
+        * So read back what opcodes it decided to support. That's the set
+        * we are going to support as well.
+        */
+       opmenu0 = readl(ispi->sregs + OPMENU0);
+       opmenu1 = readl(ispi->sregs + OPMENU1);
+
+       /*
+        * Some controllers can only do basic operations using hardware
+        * sequencer. All other operations are supposed to be carried out
+        * using software sequencer. If we find that BIOS has programmed
+        * opcodes for the software sequencer we use that over the hardware
+        * sequencer.
+        */
+       if (opmenu0 && opmenu1) {
+               for (i = 0; i < ARRAY_SIZE(ispi->opcodes) / 2; i++) {
+                       ispi->opcodes[i] = opmenu0 >> i * 8;
+                       ispi->opcodes[i + 4] = opmenu1 >> i * 8;
+               }
+
+               val = readl(ispi->sregs + PREOP_OPTYPE);
+               ispi->preopcodes[0] = val;
+               ispi->preopcodes[1] = val >> 8;
+
+               /* Disable #SMI generation from SW sequencer */
+               val = readl(ispi->sregs + SSFSTS_CTL);
+               val &= ~SSFSTS_CTL_FSMIE;
+               writel(val, ispi->sregs + SSFSTS_CTL);
+
+               ispi->swseq = true;
+       }
+
+       intel_spi_dump_regs(ispi);
+
+       return 0;
+}
+
+static int intel_spi_opcode_index(struct intel_spi *ispi, u8 opcode)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(ispi->opcodes); i++)
+               if (ispi->opcodes[i] == opcode)
+                       return i;
+       return -EINVAL;
+}
+
+static int intel_spi_hw_cycle(struct intel_spi *ispi, u8 opcode, u8 *buf,
+                             int len)
+{
+       u32 val, status;
+       int ret;
+
+       val = readl(ispi->base + HSFSTS_CTL);
+       val &= ~(HSFSTS_CTL_FCYCLE_MASK | HSFSTS_CTL_FDBC_MASK);
+
+       switch (opcode) {
+       case SPINOR_OP_RDID:
+               val |= HSFSTS_CTL_FCYCLE_RDID;
+               break;
+       case SPINOR_OP_WRSR:
+               val |= HSFSTS_CTL_FCYCLE_WRSR;
+               break;
+       case SPINOR_OP_RDSR:
+               val |= HSFSTS_CTL_FCYCLE_RDSR;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       val |= (len - 1) << HSFSTS_CTL_FDBC_SHIFT;
+       val |= HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE;
+       val |= HSFSTS_CTL_FGO;
+       writel(val, ispi->base + HSFSTS_CTL);
+
+       ret = intel_spi_wait_hw_busy(ispi);
+       if (ret)
+               return ret;
+
+       status = readl(ispi->base + HSFSTS_CTL);
+       if (status & HSFSTS_CTL_FCERR)
+               return -EIO;
+       else if (status & HSFSTS_CTL_AEL)
+               return -EACCES;
+
+       return 0;
+}
+
+static int intel_spi_sw_cycle(struct intel_spi *ispi, u8 opcode, u8 *buf,
+                             int len)
+{
+       u32 val, status;
+       int ret;
+
+       ret = intel_spi_opcode_index(ispi, opcode);
+       if (ret < 0)
+               return ret;
+
+       val = (len << SSFSTS_CTL_DBC_SHIFT) | SSFSTS_CTL_DS;
+       val |= ret << SSFSTS_CTL_COP_SHIFT;
+       val |= SSFSTS_CTL_FCERR | SSFSTS_CTL_FDONE;
+       val |= SSFSTS_CTL_SCGO;
+       writel(val, ispi->sregs + SSFSTS_CTL);
+
+       ret = intel_spi_wait_sw_busy(ispi);
+       if (ret)
+               return ret;
+
+       status = readl(ispi->base + SSFSTS_CTL);
+       if (status & SSFSTS_CTL_FCERR)
+               return -EIO;
+       else if (status & SSFSTS_CTL_AEL)
+               return -EACCES;
+
+       return 0;
+}
+
+static int intel_spi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
+{
+       struct intel_spi *ispi = nor->priv;
+       int ret;
+
+       /* Address of the first chip */
+       writel(0, ispi->base + FADDR);
+
+       if (ispi->swseq)
+               ret = intel_spi_sw_cycle(ispi, opcode, buf, len);
+       else
+               ret = intel_spi_hw_cycle(ispi, opcode, buf, len);
+
+       if (ret)
+               return ret;
+
+       return intel_spi_read_block(ispi, buf, len);
+}
+
+static int intel_spi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
+{
+       struct intel_spi *ispi = nor->priv;
+       int ret;
+
+       /*
+        * This is handled with atomic operation and preop code in Intel
+        * controller so skip it here now.
+        */
+       if (opcode == SPINOR_OP_WREN)
+               return 0;
+
+       writel(0, ispi->base + FADDR);
+
+       /* Write the value beforehand */
+       ret = intel_spi_write_block(ispi, buf, len);
+       if (ret)
+               return ret;
+
+       if (ispi->swseq)
+               return intel_spi_sw_cycle(ispi, opcode, buf, len);
+       return intel_spi_hw_cycle(ispi, opcode, buf, len);
+}
+
+static ssize_t intel_spi_read(struct spi_nor *nor, loff_t from, size_t len,
+                             u_char *read_buf)
+{
+       struct intel_spi *ispi = nor->priv;
+       size_t block_size, retlen = 0;
+       u32 val, status;
+       ssize_t ret;
+
+       switch (nor->read_opcode) {
+       case SPINOR_OP_READ:
+       case SPINOR_OP_READ_FAST:
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       while (len > 0) {
+               block_size = min_t(size_t, len, INTEL_SPI_FIFO_SZ);
+
+               writel(from, ispi->base + FADDR);
+
+               val = readl(ispi->base + HSFSTS_CTL);
+               val &= ~(HSFSTS_CTL_FDBC_MASK | HSFSTS_CTL_FCYCLE_MASK);
+               val |= HSFSTS_CTL_AEL | HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE;
+               val |= (block_size - 1) << HSFSTS_CTL_FDBC_SHIFT;
+               val |= HSFSTS_CTL_FCYCLE_READ;
+               val |= HSFSTS_CTL_FGO;
+               writel(val, ispi->base + HSFSTS_CTL);
+
+               ret = intel_spi_wait_hw_busy(ispi);
+               if (ret)
+                       return ret;
+
+               status = readl(ispi->base + HSFSTS_CTL);
+               if (status & HSFSTS_CTL_FCERR)
+                       ret = -EIO;
+               else if (status & HSFSTS_CTL_AEL)
+                       ret = -EACCES;
+
+               if (ret < 0) {
+                       dev_err(ispi->dev, "read error: %llx: %#x\n", from,
+                               status);
+                       return ret;
+               }
+
+               ret = intel_spi_read_block(ispi, read_buf, block_size);
+               if (ret)
+                       return ret;
+
+               len -= block_size;
+               from += block_size;
+               retlen += block_size;
+               read_buf += block_size;
+       }
+
+       return retlen;
+}
+
+static ssize_t intel_spi_write(struct spi_nor *nor, loff_t to, size_t len,
+                              const u_char *write_buf)
+{
+       struct intel_spi *ispi = nor->priv;
+       size_t block_size, retlen = 0;
+       u32 val, status;
+       ssize_t ret;
+
+       while (len > 0) {
+               block_size = min_t(size_t, len, INTEL_SPI_FIFO_SZ);
+
+               writel(to, ispi->base + FADDR);
+
+               val = readl(ispi->base + HSFSTS_CTL);
+               val &= ~(HSFSTS_CTL_FDBC_MASK | HSFSTS_CTL_FCYCLE_MASK);
+               val |= HSFSTS_CTL_AEL | HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE;
+               val |= (block_size - 1) << HSFSTS_CTL_FDBC_SHIFT;
+               val |= HSFSTS_CTL_FCYCLE_WRITE;
+
+               /* Write enable */
+               if (ispi->preopcodes[1] == SPINOR_OP_WREN)
+                       val |= SSFSTS_CTL_SPOP;
+               val |= SSFSTS_CTL_ACS;
+               writel(val, ispi->base + HSFSTS_CTL);
+
+               ret = intel_spi_write_block(ispi, write_buf, block_size);
+               if (ret) {
+                       dev_err(ispi->dev, "failed to write block\n");
+                       return ret;
+               }
+
+               /* Start the write now */
+               val = readl(ispi->base + HSFSTS_CTL);
+               writel(val | HSFSTS_CTL_FGO, ispi->base + HSFSTS_CTL);
+
+               ret = intel_spi_wait_hw_busy(ispi);
+               if (ret) {
+                       dev_err(ispi->dev, "timeout\n");
+                       return ret;
+               }
+
+               status = readl(ispi->base + HSFSTS_CTL);
+               if (status & HSFSTS_CTL_FCERR)
+                       ret = -EIO;
+               else if (status & HSFSTS_CTL_AEL)
+                       ret = -EACCES;
+
+               if (ret < 0) {
+                       dev_err(ispi->dev, "write error: %llx: %#x\n", to,
+                               status);
+                       return ret;
+               }
+
+               len -= block_size;
+               to += block_size;
+               retlen += block_size;
+               write_buf += block_size;
+       }
+
+       return retlen;
+}
+
+static int intel_spi_erase(struct spi_nor *nor, loff_t offs)
+{
+       size_t erase_size, len = nor->mtd.erasesize;
+       struct intel_spi *ispi = nor->priv;
+       u32 val, status, cmd;
+       int ret;
+
+       /* If the hardware can do 64k erase use that when possible */
+       if (len >= SZ_64K && ispi->erase_64k) {
+               cmd = HSFSTS_CTL_FCYCLE_ERASE_64K;
+               erase_size = SZ_64K;
+       } else {
+               cmd = HSFSTS_CTL_FCYCLE_ERASE;
+               erase_size = SZ_4K;
+       }
+
+       while (len > 0) {
+               writel(offs, ispi->base + FADDR);
+
+               val = readl(ispi->base + HSFSTS_CTL);
+               val &= ~(HSFSTS_CTL_FDBC_MASK | HSFSTS_CTL_FCYCLE_MASK);
+               val |= HSFSTS_CTL_AEL | HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE;
+               val |= cmd;
+               val |= HSFSTS_CTL_FGO;
+               writel(val, ispi->base + HSFSTS_CTL);
+
+               ret = intel_spi_wait_hw_busy(ispi);
+               if (ret)
+                       return ret;
+
+               status = readl(ispi->base + HSFSTS_CTL);
+               if (status & HSFSTS_CTL_FCERR)
+                       return -EIO;
+               else if (status & HSFSTS_CTL_AEL)
+                       return -EACCES;
+
+               offs += erase_size;
+               len -= erase_size;
+       }
+
+       return 0;
+}
+
+static bool intel_spi_is_protected(const struct intel_spi *ispi,
+                                  unsigned int base, unsigned int limit)
+{
+       int i;
+
+       for (i = 0; i < PR_NUM; i++) {
+               u32 pr_base, pr_limit, pr_value;
+
+               pr_value = readl(ispi->pregs + PR(i));
+               if (!(pr_value & (PR_WPE | PR_RPE)))
+                       continue;
+
+               pr_limit = (pr_value & PR_LIMIT_MASK) >> PR_LIMIT_SHIFT;
+               pr_base = pr_value & PR_BASE_MASK;
+
+               if (pr_base >= base && pr_limit <= limit)
+                       return true;
+       }
+
+       return false;
+}
+
+/*
+ * There will be a single partition holding all enabled flash regions. We
+ * call this "BIOS".
+ */
+static void intel_spi_fill_partition(struct intel_spi *ispi,
+                                    struct mtd_partition *part)
+{
+       u64 end;
+       int i;
+
+       memset(part, 0, sizeof(*part));
+
+       /* Start from the mandatory descriptor region */
+       part->size = 4096;
+       part->name = "BIOS";
+
+       /*
+        * Now try to find where this partition ends based on the flash
+        * region registers.
+        */
+       for (i = 1; i < ispi->nregions; i++) {
+               u32 region, base, limit;
+
+               region = readl(ispi->base + FREG(i));
+               base = region & FREG_BASE_MASK;
+               limit = (region & FREG_LIMIT_MASK) >> FREG_LIMIT_SHIFT;
+
+               if (base >= limit || limit == 0)
+                       continue;
+
+               /*
+                * If any of the regions have protection bits set, make the
+                * whole partition read-only to be on the safe side.
+                */
+               if (intel_spi_is_protected(ispi, base, limit))
+                       ispi->writeable = 0;
+
+               end = (limit << 12) + 4096;
+               if (end > part->size)
+                       part->size = end;
+       }
+}
+
+struct intel_spi *intel_spi_probe(struct device *dev,
+       struct resource *mem, const struct intel_spi_boardinfo *info)
+{
+       struct mtd_partition part;
+       struct intel_spi *ispi;
+       int ret;
+
+       if (!info || !mem)
+               return ERR_PTR(-EINVAL);
+
+       ispi = devm_kzalloc(dev, sizeof(*ispi), GFP_KERNEL);
+       if (!ispi)
+               return ERR_PTR(-ENOMEM);
+
+       ispi->base = devm_ioremap_resource(dev, mem);
+       if (IS_ERR(ispi->base))
+               return ispi->base;
+
+       ispi->dev = dev;
+       ispi->info = info;
+       ispi->writeable = info->writeable;
+
+       ret = intel_spi_init(ispi);
+       if (ret)
+               return ERR_PTR(ret);
+
+       ispi->nor.dev = ispi->dev;
+       ispi->nor.priv = ispi;
+       ispi->nor.read_reg = intel_spi_read_reg;
+       ispi->nor.write_reg = intel_spi_write_reg;
+       ispi->nor.read = intel_spi_read;
+       ispi->nor.write = intel_spi_write;
+       ispi->nor.erase = intel_spi_erase;
+
+       ret = spi_nor_scan(&ispi->nor, NULL, SPI_NOR_NORMAL);
+       if (ret) {
+               dev_info(dev, "failed to locate the chip\n");
+               return ERR_PTR(ret);
+       }
+
+       intel_spi_fill_partition(ispi, &part);
+
+       /* Prevent writes if not explicitly enabled */
+       if (!ispi->writeable || !writeable)
+               ispi->nor.mtd.flags &= ~MTD_WRITEABLE;
+
+       ret = mtd_device_parse_register(&ispi->nor.mtd, NULL, NULL, &part, 1);
+       if (ret)
+               return ERR_PTR(ret);
+
+       return ispi;
+}
+EXPORT_SYMBOL_GPL(intel_spi_probe);
+
+int intel_spi_remove(struct intel_spi *ispi)
+{
+       return mtd_device_unregister(&ispi->nor.mtd);
+}
+EXPORT_SYMBOL_GPL(intel_spi_remove);
+
+MODULE_DESCRIPTION("Intel PCH/PCU SPI flash core driver");
+MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/spi-nor/intel-spi.h b/drivers/mtd/spi-nor/intel-spi.h
new file mode 100644 (file)
index 0000000..5ab7dc2
--- /dev/null
@@ -0,0 +1,24 @@
+/*
+ * Intel PCH/PCU SPI flash driver.
+ *
+ * Copyright (C) 2016, Intel Corporation
+ * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef INTEL_SPI_H
+#define INTEL_SPI_H
+
+#include <linux/platform_data/intel-spi.h>
+
+struct intel_spi;
+struct resource;
+
+struct intel_spi *intel_spi_probe(struct device *dev,
+       struct resource *mem, const struct intel_spi_boardinfo *info);
+int intel_spi_remove(struct intel_spi *ispi);
+
+#endif /* INTEL_SPI_H */
index da7cd69d4857282d80c88b57e887294407ccc6e8..1ae872bfc3ba5be342993f4d176b5a2d4390da57 100644 (file)
@@ -75,6 +75,16 @@ struct flash_info {
                                         * bit. Must be used with
                                         * SPI_NOR_HAS_LOCK.
                                         */
+#define        SPI_S3AN                BIT(10) /*
+                                        * Xilinx Spartan 3AN In-System Flash
+                                        * (MFR cannot be used for probing
+                                        * because it has the same value as
+                                        * ATMEL flashes)
+                                        */
+#define SPI_NOR_4B_OPCODES     BIT(11) /*
+                                        * Use dedicated 4byte address op codes
+                                        * to support memory size above 128Mib.
+                                        */
 };
 
 #define JEDEC_MFR(info)        ((info)->id[0])
@@ -122,7 +132,7 @@ static int read_fsr(struct spi_nor *nor)
 /*
  * Read configuration register, returning its value in the
  * location. Return the configuration register value.
- * Returns negative if error occured.
+ * Returns negative if error occurred.
  */
 static int read_cr(struct spi_nor *nor)
 {
@@ -188,6 +198,78 @@ static inline struct spi_nor *mtd_to_spi_nor(struct mtd_info *mtd)
        return mtd->priv;
 }
 
+
+static u8 spi_nor_convert_opcode(u8 opcode, const u8 table[][2], size_t size)
+{
+       size_t i;
+
+       for (i = 0; i < size; i++)
+               if (table[i][0] == opcode)
+                       return table[i][1];
+
+       /* No conversion found, keep input op code. */
+       return opcode;
+}
+
+static inline u8 spi_nor_convert_3to4_read(u8 opcode)
+{
+       static const u8 spi_nor_3to4_read[][2] = {
+               { SPINOR_OP_READ,       SPINOR_OP_READ_4B },
+               { SPINOR_OP_READ_FAST,  SPINOR_OP_READ_FAST_4B },
+               { SPINOR_OP_READ_1_1_2, SPINOR_OP_READ_1_1_2_4B },
+               { SPINOR_OP_READ_1_2_2, SPINOR_OP_READ_1_2_2_4B },
+               { SPINOR_OP_READ_1_1_4, SPINOR_OP_READ_1_1_4_4B },
+               { SPINOR_OP_READ_1_4_4, SPINOR_OP_READ_1_4_4_4B },
+       };
+
+       return spi_nor_convert_opcode(opcode, spi_nor_3to4_read,
+                                     ARRAY_SIZE(spi_nor_3to4_read));
+}
+
+static inline u8 spi_nor_convert_3to4_program(u8 opcode)
+{
+       static const u8 spi_nor_3to4_program[][2] = {
+               { SPINOR_OP_PP,         SPINOR_OP_PP_4B },
+               { SPINOR_OP_PP_1_1_4,   SPINOR_OP_PP_1_1_4_4B },
+               { SPINOR_OP_PP_1_4_4,   SPINOR_OP_PP_1_4_4_4B },
+       };
+
+       return spi_nor_convert_opcode(opcode, spi_nor_3to4_program,
+                                     ARRAY_SIZE(spi_nor_3to4_program));
+}
+
+static inline u8 spi_nor_convert_3to4_erase(u8 opcode)
+{
+       static const u8 spi_nor_3to4_erase[][2] = {
+               { SPINOR_OP_BE_4K,      SPINOR_OP_BE_4K_4B },
+               { SPINOR_OP_BE_32K,     SPINOR_OP_BE_32K_4B },
+               { SPINOR_OP_SE,         SPINOR_OP_SE_4B },
+       };
+
+       return spi_nor_convert_opcode(opcode, spi_nor_3to4_erase,
+                                     ARRAY_SIZE(spi_nor_3to4_erase));
+}
+
+static void spi_nor_set_4byte_opcodes(struct spi_nor *nor,
+                                     const struct flash_info *info)
+{
+       /* Do some manufacturer fixups first */
+       switch (JEDEC_MFR(info)) {
+       case SNOR_MFR_SPANSION:
+               /* No small sector erase for 4-byte command set */
+               nor->erase_opcode = SPINOR_OP_SE;
+               nor->mtd.erasesize = info->sector_size;
+               break;
+
+       default:
+               break;
+       }
+
+       nor->read_opcode = spi_nor_convert_3to4_read(nor->read_opcode);
+       nor->program_opcode = spi_nor_convert_3to4_program(nor->program_opcode);
+       nor->erase_opcode = spi_nor_convert_3to4_erase(nor->erase_opcode);
+}
+
 /* Enable/disable 4-byte addressing mode. */
 static inline int set_4byte(struct spi_nor *nor, const struct flash_info *info,
                            int enable)
@@ -217,6 +299,21 @@ static inline int set_4byte(struct spi_nor *nor, const struct flash_info *info,
                return nor->write_reg(nor, SPINOR_OP_BRWR, nor->cmd_buf, 1);
        }
 }
+
+static int s3an_sr_ready(struct spi_nor *nor)
+{
+       int ret;
+       u8 val;
+
+       ret = nor->read_reg(nor, SPINOR_OP_XRDSR, &val, 1);
+       if (ret < 0) {
+               dev_err(nor->dev, "error %d reading XRDSR\n", (int) ret);
+               return ret;
+       }
+
+       return !!(val & XSR_RDY);
+}
+
 static inline int spi_nor_sr_ready(struct spi_nor *nor)
 {
        int sr = read_sr(nor);
@@ -238,7 +335,11 @@ static inline int spi_nor_fsr_ready(struct spi_nor *nor)
 static int spi_nor_ready(struct spi_nor *nor)
 {
        int sr, fsr;
-       sr = spi_nor_sr_ready(nor);
+
+       if (nor->flags & SNOR_F_READY_XSR_RDY)
+               sr = s3an_sr_ready(nor);
+       else
+               sr = spi_nor_sr_ready(nor);
        if (sr < 0)
                return sr;
        fsr = nor->flags & SNOR_F_USE_FSR ? spi_nor_fsr_ready(nor) : 1;
@@ -319,6 +420,27 @@ static void spi_nor_unlock_and_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
        mutex_unlock(&nor->lock);
 }
 
+/*
+ * This code converts an address to the Default Address Mode, that has non
+ * power of two page sizes. We must support this mode because it is the default
+ * mode supported by Xilinx tools, it can access the whole flash area and
+ * changing over to the Power-of-two mode is irreversible and corrupts the
+ * original data.
+ * Addr can safely be unsigned int, the biggest S3AN device is smaller than
+ * 4 MiB.
+ */
+static loff_t spi_nor_s3an_addr_convert(struct spi_nor *nor, unsigned int addr)
+{
+       unsigned int offset;
+       unsigned int page;
+
+       offset = addr % nor->page_size;
+       page = addr / nor->page_size;
+       page <<= (nor->page_size > 512) ? 10 : 9;
+
+       return page | offset;
+}
+
 /*
  * Initiate the erasure of a single sector
  */
@@ -327,6 +449,9 @@ static int spi_nor_erase_sector(struct spi_nor *nor, u32 addr)
        u8 buf[SPI_NOR_MAX_ADDR_WIDTH];
        int i;
 
+       if (nor->flags & SNOR_F_S3AN_ADDR_DEFAULT)
+               addr = spi_nor_s3an_addr_convert(nor, addr);
+
        if (nor->erase)
                return nor->erase(nor, addr);
 
@@ -368,7 +493,7 @@ static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
                return ret;
 
        /* whole-chip erase? */
-       if (len == mtd->size) {
+       if (len == mtd->size && !(nor->flags & SNOR_F_NO_OP_CHIP_ERASE)) {
                unsigned long timeout;
 
                write_enable(nor);
@@ -782,6 +907,19 @@ static int spi_nor_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
                .addr_width = (_addr_width),                            \
                .flags = (_flags),
 
+#define S3AN_INFO(_jedec_id, _n_sectors, _page_size)                   \
+               .id = {                                                 \
+                       ((_jedec_id) >> 16) & 0xff,                     \
+                       ((_jedec_id) >> 8) & 0xff,                      \
+                       (_jedec_id) & 0xff                              \
+                       },                                              \
+               .id_len = 3,                                            \
+               .sector_size = (8*_page_size),                          \
+               .n_sectors = (_n_sectors),                              \
+               .page_size = _page_size,                                \
+               .addr_width = 3,                                        \
+               .flags = SPI_NOR_NO_FR | SPI_S3AN,
+
 /* NOTE: double check command sets and memory organization when you add
  * more nor chips.  This current list focusses on newer chips, which
  * have been converging on command sets which including JEDEC ID.
@@ -821,7 +959,7 @@ static const struct flash_info spi_nor_ids[] = {
        { "en25s64",    INFO(0x1c3817, 0, 64 * 1024,  128, SECT_4K) },
 
        /* ESMT */
-       { "f25l32pa", INFO(0x8c2016, 0, 64 * 1024, 64, SECT_4K) },
+       { "f25l32pa", INFO(0x8c2016, 0, 64 * 1024, 64, SECT_4K | SPI_NOR_HAS_LOCK) },
 
        /* Everspin */
        { "mr25h256", CAT25_INFO( 32 * 1024, 1, 256, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
@@ -832,6 +970,11 @@ static const struct flash_info spi_nor_ids[] = {
        { "mb85rs1mt", INFO(0x047f27, 0, 128 * 1024, 1, SPI_NOR_NO_ERASE) },
 
        /* GigaDevice */
+       {
+               "gd25q16", INFO(0xc84015, 0, 64 * 1024,  32,
+                       SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+                       SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+       },
        {
                "gd25q32", INFO(0xc84016, 0, 64 * 1024,  64,
                        SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
@@ -1014,6 +1157,13 @@ static const struct flash_info spi_nor_ids[] = {
        { "cat25c09", CAT25_INFO( 128, 8, 32, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
        { "cat25c17", CAT25_INFO( 256, 8, 32, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
        { "cat25128", CAT25_INFO(2048, 8, 64, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
+
+       /* Xilinx S3AN Internal Flash */
+       { "3S50AN", S3AN_INFO(0x1f2200, 64, 264) },
+       { "3S200AN", S3AN_INFO(0x1f2400, 256, 264) },
+       { "3S400AN", S3AN_INFO(0x1f2400, 256, 264) },
+       { "3S700AN", S3AN_INFO(0x1f2500, 512, 264) },
+       { "3S1400AN", S3AN_INFO(0x1f2600, 512, 528) },
        { },
 };
 
@@ -1054,7 +1204,12 @@ static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
                return ret;
 
        while (len) {
-               ret = nor->read(nor, from, len, buf);
+               loff_t addr = from;
+
+               if (nor->flags & SNOR_F_S3AN_ADDR_DEFAULT)
+                       addr = spi_nor_s3an_addr_convert(nor, addr);
+
+               ret = nor->read(nor, addr, len, buf);
                if (ret == 0) {
                        /* We shouldn't see 0-length reads */
                        ret = -EIO;
@@ -1175,17 +1330,32 @@ static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
 
        for (i = 0; i < len; ) {
                ssize_t written;
+               loff_t addr = to + i;
+
+               /*
+                * If page_size is a power of two, the offset can be quickly
+                * calculated with an AND operation. On the other cases we
+                * need to do a modulus operation (more expensive).
+                * Power of two numbers have only one bit set and we can use
+                * the instruction hweight32 to detect if we need to do a
+                * modulus (do_div()) or not.
+                */
+               if (hweight32(nor->page_size) == 1) {
+                       page_offset = addr & (nor->page_size - 1);
+               } else {
+                       uint64_t aux = addr;
 
-               page_offset = (to + i) & (nor->page_size - 1);
-               WARN_ONCE(page_offset,
-                         "Writing at offset %zu into a NOR page. Writing partial pages may decrease reliability and increase wear of NOR flash.",
-                         page_offset);
+                       page_offset = do_div(aux, nor->page_size);
+               }
                /* the size of data remaining on the first page */
                page_remain = min_t(size_t,
                                    nor->page_size - page_offset, len - i);
 
+               if (nor->flags & SNOR_F_S3AN_ADDR_DEFAULT)
+                       addr = spi_nor_s3an_addr_convert(nor, addr);
+
                write_enable(nor);
-               ret = nor->write(nor, to + i, page_remain, buf + i);
+               ret = nor->write(nor, addr, page_remain, buf + i);
                if (ret < 0)
                        goto write_err;
                written = ret;
@@ -1216,6 +1386,9 @@ static int macronix_quad_enable(struct spi_nor *nor)
        val = read_sr(nor);
        if (val < 0)
                return val;
+       if (val & SR_QUAD_EN_MX)
+               return 0;
+
        write_enable(nor);
 
        write_sr(nor, val | SR_QUAD_EN_MX);
@@ -1236,7 +1409,7 @@ static int macronix_quad_enable(struct spi_nor *nor)
  * Write status Register and configuration register with 2 bytes
  * The first byte will be written to the status register, while the
  * second byte will be written to the configuration register.
- * Return negative if error occured.
+ * Return negative if error occurred.
  */
 static int write_sr_cr(struct spi_nor *nor, u16 val)
 {
@@ -1312,6 +1485,47 @@ static int spi_nor_check(struct spi_nor *nor)
        return 0;
 }
 
+static int s3an_nor_scan(const struct flash_info *info, struct spi_nor *nor)
+{
+       int ret;
+       u8 val;
+
+       ret = nor->read_reg(nor, SPINOR_OP_XRDSR, &val, 1);
+       if (ret < 0) {
+               dev_err(nor->dev, "error %d reading XRDSR\n", (int) ret);
+               return ret;
+       }
+
+       nor->erase_opcode = SPINOR_OP_XSE;
+       nor->program_opcode = SPINOR_OP_XPP;
+       nor->read_opcode = SPINOR_OP_READ;
+       nor->flags |= SNOR_F_NO_OP_CHIP_ERASE;
+
+       /*
+        * This flashes have a page size of 264 or 528 bytes (known as
+        * Default addressing mode). It can be changed to a more standard
+        * Power of two mode where the page size is 256/512. This comes
+        * with a price: there is 3% less of space, the data is corrupted
+        * and the page size cannot be changed back to default addressing
+        * mode.
+        *
+        * The current addressing mode can be read from the XRDSR register
+        * and should not be changed, because is a destructive operation.
+        */
+       if (val & XSR_PAGESIZE) {
+               /* Flash in Power of 2 mode */
+               nor->page_size = (nor->page_size == 264) ? 256 : 512;
+               nor->mtd.writebufsize = nor->page_size;
+               nor->mtd.size = 8 * nor->page_size * info->n_sectors;
+               nor->mtd.erasesize = 8 * nor->page_size;
+       } else {
+               /* Flash in Default addressing mode */
+               nor->flags |= SNOR_F_S3AN_ADDR_DEFAULT;
+       }
+
+       return 0;
+}
+
 int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode)
 {
        const struct flash_info *info = NULL;
@@ -1359,6 +1573,14 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode)
 
        mutex_init(&nor->lock);
 
+       /*
+        * Make sure the XSR_RDY flag is set before calling
+        * spi_nor_wait_till_ready(). Xilinx S3AN share MFR
+        * with Atmel spi-nor
+        */
+       if (info->flags & SPI_S3AN)
+               nor->flags |=  SNOR_F_READY_XSR_RDY;
+
        /*
         * Atmel, SST, Intel/Numonyx, and others serial NOR tend to power up
         * with the software protection bits set
@@ -1483,27 +1705,10 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode)
        else if (mtd->size > 0x1000000) {
                /* enable 4-byte addressing if the device exceeds 16MiB */
                nor->addr_width = 4;
-               if (JEDEC_MFR(info) == SNOR_MFR_SPANSION) {
-                       /* Dedicated 4-byte command set */
-                       switch (nor->flash_read) {
-                       case SPI_NOR_QUAD:
-                               nor->read_opcode = SPINOR_OP_READ4_1_1_4;
-                               break;
-                       case SPI_NOR_DUAL:
-                               nor->read_opcode = SPINOR_OP_READ4_1_1_2;
-                               break;
-                       case SPI_NOR_FAST:
-                               nor->read_opcode = SPINOR_OP_READ4_FAST;
-                               break;
-                       case SPI_NOR_NORMAL:
-                               nor->read_opcode = SPINOR_OP_READ4;
-                               break;
-                       }
-                       nor->program_opcode = SPINOR_OP_PP_4B;
-                       /* No small sector erase for 4-byte command set */
-                       nor->erase_opcode = SPINOR_OP_SE_4B;
-                       mtd->erasesize = info->sector_size;
-               } else
+               if (JEDEC_MFR(info) == SNOR_MFR_SPANSION ||
+                   info->flags & SPI_NOR_4B_OPCODES)
+                       spi_nor_set_4byte_opcodes(nor, info);
+               else
                        set_4byte(nor, info, 1);
        } else {
                nor->addr_width = 3;
@@ -1517,6 +1722,12 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode)
 
        nor->read_dummy = spi_nor_read_dummy_cycles(nor);
 
+       if (info->flags & SPI_S3AN) {
+               ret = s3an_nor_scan(info, nor);
+               if (ret)
+                       return ret;
+       }
+
        dev_info(dev, "%s (%lld Kbytes)\n", info->name,
                        (long long)mtd->size >> 10);
 
index 7be393c96b1a0481c1f181443a7b521b647378f5..cf7c18947189a1f5c2834ad5b0d14ac615686294 100644 (file)
@@ -161,6 +161,7 @@ static int c_can_pci_probe(struct pci_dev *pdev,
 
        dev->irq = pdev->irq;
        priv->base = addr;
+       priv->device = &pdev->dev;
 
        if (!c_can_pci_data->freq) {
                dev_err(&pdev->dev, "no clock frequency defined\n");
index 680d1ff07a55ddd60ceb09eb42bb98faaa42ad9f..6749b1829469411315dedac1634b7be974cf21d8 100644 (file)
@@ -948,7 +948,12 @@ static int ti_hecc_probe(struct platform_device *pdev)
        netif_napi_add(ndev, &priv->napi, ti_hecc_rx_poll,
                HECC_DEF_NAPI_WEIGHT);
 
-       clk_enable(priv->clk);
+       err = clk_prepare_enable(priv->clk);
+       if (err) {
+               dev_err(&pdev->dev, "clk_prepare_enable() failed\n");
+               goto probe_exit_clk;
+       }
+
        err = register_candev(ndev);
        if (err) {
                dev_err(&pdev->dev, "register_candev() failed\n");
@@ -981,7 +986,7 @@ static int ti_hecc_remove(struct platform_device *pdev)
        struct ti_hecc_priv *priv = netdev_priv(ndev);
 
        unregister_candev(ndev);
-       clk_disable(priv->clk);
+       clk_disable_unprepare(priv->clk);
        clk_put(priv->clk);
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        iounmap(priv->base);
@@ -1006,7 +1011,7 @@ static int ti_hecc_suspend(struct platform_device *pdev, pm_message_t state)
        hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_PDR);
        priv->can.state = CAN_STATE_SLEEPING;
 
-       clk_disable(priv->clk);
+       clk_disable_unprepare(priv->clk);
 
        return 0;
 }
@@ -1015,8 +1020,11 @@ static int ti_hecc_resume(struct platform_device *pdev)
 {
        struct net_device *dev = platform_get_drvdata(pdev);
        struct ti_hecc_priv *priv = netdev_priv(dev);
+       int err;
 
-       clk_enable(priv->clk);
+       err = clk_prepare_enable(priv->clk);
+       if (err)
+               return err;
 
        hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_PDR);
        priv->can.state = CAN_STATE_ERROR_ACTIVE;
index c12d2618eebf76397b2e71eaeb6f2fafa938fea6..3872ab96b80a39eecbb1d0b8150a2e8288915e46 100644 (file)
@@ -1152,6 +1152,12 @@ static void init_ring(struct net_device *dev)
                if (skb == NULL)
                        break;
                np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
+               if (pci_dma_mapping_error(np->pci_dev,
+                                         np->rx_info[i].mapping)) {
+                       dev_kfree_skb(skb);
+                       np->rx_info[i].skb = NULL;
+                       break;
+               }
                /* Grrr, we cannot offset to correctly align the IP header. */
                np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid);
        }
@@ -1182,8 +1188,9 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
 {
        struct netdev_private *np = netdev_priv(dev);
        unsigned int entry;
+       unsigned int prev_tx;
        u32 status;
-       int i;
+       int i, j;
 
        /*
         * be cautious here, wrapping the queue has weird semantics
@@ -1201,6 +1208,7 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
        }
 #endif /* ZEROCOPY && HAS_BROKEN_FIRMWARE */
 
+       prev_tx = np->cur_tx;
        entry = np->cur_tx % TX_RING_SIZE;
        for (i = 0; i < skb_num_frags(skb); i++) {
                int wrap_ring = 0;
@@ -1234,6 +1242,11 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
                                               skb_frag_size(this_frag),
                                               PCI_DMA_TODEVICE);
                }
+               if (pci_dma_mapping_error(np->pci_dev,
+                                         np->tx_info[entry].mapping)) {
+                       dev->stats.tx_dropped++;
+                       goto err_out;
+               }
 
                np->tx_ring[entry].addr = cpu_to_dma(np->tx_info[entry].mapping);
                np->tx_ring[entry].status = cpu_to_le32(status);
@@ -1268,8 +1281,30 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
                netif_stop_queue(dev);
 
        return NETDEV_TX_OK;
-}
 
+err_out:
+       entry = prev_tx % TX_RING_SIZE;
+       np->tx_info[entry].skb = NULL;
+       if (i > 0) {
+               pci_unmap_single(np->pci_dev,
+                                np->tx_info[entry].mapping,
+                                skb_first_frag_len(skb),
+                                PCI_DMA_TODEVICE);
+               np->tx_info[entry].mapping = 0;
+               entry = (entry + np->tx_info[entry].used_slots) % TX_RING_SIZE;
+               for (j = 1; j < i; j++) {
+                       pci_unmap_single(np->pci_dev,
+                                        np->tx_info[entry].mapping,
+                                        skb_frag_size(
+                                               &skb_shinfo(skb)->frags[j-1]),
+                                        PCI_DMA_TODEVICE);
+                       entry++;
+               }
+       }
+       dev_kfree_skb_any(skb);
+       np->cur_tx = prev_tx;
+       return NETDEV_TX_OK;
+}
 
 /* The interrupt handler does all of the Rx thread work and cleans up
    after the Tx thread. */
@@ -1569,6 +1604,12 @@ static void refill_rx_ring(struct net_device *dev)
                                break;  /* Better luck next round. */
                        np->rx_info[entry].mapping =
                                pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
+                       if (pci_dma_mapping_error(np->pci_dev,
+                                               np->rx_info[entry].mapping)) {
+                               dev_kfree_skb(skb);
+                               np->rx_info[entry].skb = NULL;
+                               break;
+                       }
                        np->rx_ring[entry].rxaddr =
                                cpu_to_dma(np->rx_info[entry].mapping | RxDescValid);
                }
index 5b7ba25e006522a6143d6c73a9b8141a7ac76f85..8a280e7d66bddc998763288a5756b2ae6a7f70bc 100644 (file)
 #define PCS_V1_WINDOW_SELECT           0x03fc
 #define PCS_V2_WINDOW_DEF              0x9060
 #define PCS_V2_WINDOW_SELECT           0x9064
+#define PCS_V2_RV_WINDOW_DEF           0x1060
+#define PCS_V2_RV_WINDOW_SELECT                0x1064
 
 /* PCS register entry bit positions and sizes */
 #define PCS_V2_WINDOW_DEF_OFFSET_INDEX 6
index aaf0350076a90a1dcd91502b9ce8e4a81cff7174..a7d16db5c4b21d8f9d80d9801259419da8def379 100644 (file)
@@ -1151,7 +1151,7 @@ static int xgbe_read_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad,
        offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
 
        spin_lock_irqsave(&pdata->xpcs_lock, flags);
-       XPCS32_IOWRITE(pdata, PCS_V2_WINDOW_SELECT, index);
+       XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
        mmd_data = XPCS16_IOREAD(pdata, offset);
        spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
 
@@ -1183,7 +1183,7 @@ static void xgbe_write_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad,
        offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
 
        spin_lock_irqsave(&pdata->xpcs_lock, flags);
-       XPCS32_IOWRITE(pdata, PCS_V2_WINDOW_SELECT, index);
+       XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
        XPCS16_IOWRITE(pdata, offset, mmd_data);
        spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
 }
@@ -3407,8 +3407,10 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
 
        /* Flush Tx queues */
        ret = xgbe_flush_tx_queues(pdata);
-       if (ret)
+       if (ret) {
+               netdev_err(pdata->netdev, "error flushing TX queues\n");
                return ret;
+       }
 
        /*
         * Initialize DMA related features
index 9943629fcbf9ae14a9683e0b2eb0da459f83c0c6..1c87cc20407590fc84710c9ecb4f8047cf811797 100644 (file)
@@ -1070,7 +1070,9 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
 
        DBGPR("-->xgbe_start\n");
 
-       hw_if->init(pdata);
+       ret = hw_if->init(pdata);
+       if (ret)
+               return ret;
 
        xgbe_napi_enable(pdata, 1);
 
index e76b7f65b805171ca81945efb75a44c3c1d7242d..c2730f15bd8b62d2e0487e4eef11f6518a476c39 100644 (file)
@@ -265,6 +265,7 @@ static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        struct xgbe_prv_data *pdata;
        struct device *dev = &pdev->dev;
        void __iomem * const *iomap_table;
+       struct pci_dev *rdev;
        unsigned int ma_lo, ma_hi;
        unsigned int reg;
        int bar_mask;
@@ -326,8 +327,20 @@ static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        if (netif_msg_probe(pdata))
                dev_dbg(dev, "xpcs_regs  = %p\n", pdata->xpcs_regs);
 
+       /* Set the PCS indirect addressing definition registers */
+       rdev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0));
+       if (rdev &&
+           (rdev->vendor == PCI_VENDOR_ID_AMD) && (rdev->device == 0x15d0)) {
+               pdata->xpcs_window_def_reg = PCS_V2_RV_WINDOW_DEF;
+               pdata->xpcs_window_sel_reg = PCS_V2_RV_WINDOW_SELECT;
+       } else {
+               pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF;
+               pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT;
+       }
+       pci_dev_put(rdev);
+
        /* Configure the PCS indirect addressing support */
-       reg = XPCS32_IOREAD(pdata, PCS_V2_WINDOW_DEF);
+       reg = XPCS32_IOREAD(pdata, pdata->xpcs_window_def_reg);
        pdata->xpcs_window = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, OFFSET);
        pdata->xpcs_window <<= 6;
        pdata->xpcs_window_size = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, SIZE);
index f52a9bd05baca559d7afd6e1184010d103e1c08a..00108815b55eeb1417bd83bea4c9fbb7aa101fe1 100644 (file)
@@ -955,6 +955,8 @@ struct xgbe_prv_data {
 
        /* XPCS indirect addressing lock */
        spinlock_t xpcs_lock;
+       unsigned int xpcs_window_def_reg;
+       unsigned int xpcs_window_sel_reg;
        unsigned int xpcs_window;
        unsigned int xpcs_window_size;
        unsigned int xpcs_window_mask;
index c8f525574d68c67cbed2899c8461ace4606a7515..7dcc907a449d60adcbeb27cf58434edb0442792b 100644 (file)
@@ -685,8 +685,6 @@ static int alx_alloc_rings(struct alx_priv *alx)
                return -ENOMEM;
        }
 
-       alx_reinit_rings(alx);
-
        return 0;
 }
 
@@ -703,7 +701,7 @@ static void alx_free_rings(struct alx_priv *alx)
        if (alx->qnapi[0] && alx->qnapi[0]->rxq)
                kfree(alx->qnapi[0]->rxq->bufs);
 
-       if (!alx->descmem.virt)
+       if (alx->descmem.virt)
                dma_free_coherent(&alx->hw.pdev->dev,
                                  alx->descmem.size,
                                  alx->descmem.virt,
@@ -984,6 +982,7 @@ static int alx_realloc_resources(struct alx_priv *alx)
        alx_free_rings(alx);
        alx_free_napis(alx);
        alx_disable_advanced_intr(alx);
+       alx_init_intr(alx, false);
 
        err = alx_alloc_napis(alx);
        if (err)
@@ -1241,6 +1240,12 @@ static int __alx_open(struct alx_priv *alx, bool resume)
        if (err)
                goto out_free_rings;
 
+       /* must be called after alx_request_irq because the chip stops working
+        * if we copy the dma addresses in alx_init_ring_ptrs twice when
+        * requesting msi-x interrupts failed
+        */
+       alx_reinit_rings(alx);
+
        netif_set_real_num_tx_queues(alx->dev, alx->num_txq);
        netif_set_real_num_rx_queues(alx->dev, alx->num_rxq);
 
index 3b14d51442280b8a399b0d9b7145bebfc1560597..c483618b57bd7ef93f8522a91814a5dd9d9b0eed 100644 (file)
@@ -913,6 +913,8 @@ static int bcm_enet_open(struct net_device *dev)
                priv->old_link = 0;
                priv->old_duplex = -1;
                priv->old_pause = -1;
+       } else {
+               phydev = NULL;
        }
 
        /* mask all interrupts and request them */
@@ -1083,7 +1085,7 @@ static int bcm_enet_open(struct net_device *dev)
        enet_dmac_writel(priv, priv->dma_chan_int_mask,
                         ENETDMAC_IRMASK, priv->tx_chan);
 
-       if (priv->has_phy)
+       if (phydev)
                phy_start(phydev);
        else
                bcm_enet_adjust_link(dev);
@@ -1126,7 +1128,7 @@ out_freeirq:
        free_irq(dev->irq, dev);
 
 out_phy_disconnect:
-       if (priv->has_phy)
+       if (phydev)
                phy_disconnect(phydev);
 
        return ret;
index 9608cb49a11ca3d40c9d497bad10bf94d42433ad..4fcc6a84a087974e5d73042ce1af98268f59bcde 100644 (file)
@@ -1099,7 +1099,7 @@ static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
 {
 #ifdef CONFIG_INET
        struct tcphdr *th;
-       int len, nw_off, tcp_opt_len;
+       int len, nw_off, tcp_opt_len = 0;
 
        if (tcp_ts)
                tcp_opt_len = 12;
@@ -5314,17 +5314,12 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
        if ((link_info->support_auto_speeds | diff) !=
            link_info->support_auto_speeds) {
                /* An advertised speed is no longer supported, so we need to
-                * update the advertisement settings.  See bnxt_reset() for
-                * comments about the rtnl_lock() sequence below.
+                * update the advertisement settings.  Caller holds RTNL
+                * so we can modify link settings.
                 */
-               clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
-               rtnl_lock();
                link_info->advertising = link_info->support_auto_speeds;
-               if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
-                   (link_info->autoneg & BNXT_AUTONEG_SPEED))
+               if (link_info->autoneg & BNXT_AUTONEG_SPEED)
                        bnxt_hwrm_set_link_setting(bp, true, false);
-               set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
-               rtnl_unlock();
        }
        return 0;
 }
@@ -6200,29 +6195,37 @@ bnxt_restart_timer:
        mod_timer(&bp->timer, jiffies + bp->current_interval);
 }
 
-/* Only called from bnxt_sp_task() */
-static void bnxt_reset(struct bnxt *bp, bool silent)
+static void bnxt_rtnl_lock_sp(struct bnxt *bp)
 {
-       /* bnxt_reset_task() calls bnxt_close_nic() which waits
-        * for BNXT_STATE_IN_SP_TASK to clear.
-        * If there is a parallel dev_close(), bnxt_close() may be holding
+       /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
+        * set.  If the device is being closed, bnxt_close() may be holding
         * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear.  So we
         * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
         */
        clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
        rtnl_lock();
-       if (test_bit(BNXT_STATE_OPEN, &bp->state))
-               bnxt_reset_task(bp, silent);
+}
+
+static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
+{
        set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
        rtnl_unlock();
 }
 
+/* Only called from bnxt_sp_task() */
+static void bnxt_reset(struct bnxt *bp, bool silent)
+{
+       bnxt_rtnl_lock_sp(bp);
+       if (test_bit(BNXT_STATE_OPEN, &bp->state))
+               bnxt_reset_task(bp, silent);
+       bnxt_rtnl_unlock_sp(bp);
+}
+
 static void bnxt_cfg_ntp_filters(struct bnxt *);
 
 static void bnxt_sp_task(struct work_struct *work)
 {
        struct bnxt *bp = container_of(work, struct bnxt, sp_task);
-       int rc;
 
        set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
        smp_mb__after_atomic();
@@ -6236,16 +6239,6 @@ static void bnxt_sp_task(struct work_struct *work)
 
        if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
                bnxt_cfg_ntp_filters(bp);
-       if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
-               if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
-                                      &bp->sp_event))
-                       bnxt_hwrm_phy_qcaps(bp);
-
-               rc = bnxt_update_link(bp, true);
-               if (rc)
-                       netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
-                                  rc);
-       }
        if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
                bnxt_hwrm_exec_fwd_req(bp);
        if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) {
@@ -6266,18 +6259,39 @@ static void bnxt_sp_task(struct work_struct *work)
                bnxt_hwrm_tunnel_dst_port_free(
                        bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
        }
+       if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event))
+               bnxt_hwrm_port_qstats(bp);
+
+       /* These functions below will clear BNXT_STATE_IN_SP_TASK.  They
+        * must be the last functions to be called before exiting.
+        */
+       if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
+               int rc = 0;
+
+               if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
+                                      &bp->sp_event))
+                       bnxt_hwrm_phy_qcaps(bp);
+
+               bnxt_rtnl_lock_sp(bp);
+               if (test_bit(BNXT_STATE_OPEN, &bp->state))
+                       rc = bnxt_update_link(bp, true);
+               bnxt_rtnl_unlock_sp(bp);
+               if (rc)
+                       netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
+                                  rc);
+       }
+       if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
+               bnxt_rtnl_lock_sp(bp);
+               if (test_bit(BNXT_STATE_OPEN, &bp->state))
+                       bnxt_get_port_module_status(bp);
+               bnxt_rtnl_unlock_sp(bp);
+       }
        if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
                bnxt_reset(bp, false);
 
        if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
                bnxt_reset(bp, true);
 
-       if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event))
-               bnxt_get_port_module_status(bp);
-
-       if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event))
-               bnxt_hwrm_port_qstats(bp);
-
        smp_mb__before_atomic();
        clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
 }
index c0fb80acc2dad4b91d3b3cb8198be50dc9597fbd..baba2db9d9c25988da94cb323e5d1a6832a12b51 100644 (file)
 #define DEFAULT_RX_RING_SIZE   512 /* must be power of 2 */
 #define MIN_RX_RING_SIZE       64
 #define MAX_RX_RING_SIZE       8192
-#define RX_RING_BYTES(bp)      (sizeof(struct macb_dma_desc)   \
+#define RX_RING_BYTES(bp)      (macb_dma_desc_get_size(bp)     \
                                 * (bp)->rx_ring_size)
 
 #define DEFAULT_TX_RING_SIZE   512 /* must be power of 2 */
 #define MIN_TX_RING_SIZE       64
 #define MAX_TX_RING_SIZE       4096
-#define TX_RING_BYTES(bp)      (sizeof(struct macb_dma_desc)   \
+#define TX_RING_BYTES(bp)      (macb_dma_desc_get_size(bp)     \
                                 * (bp)->tx_ring_size)
 
 /* level of occupied TX descriptors under which we wake up TX process */
  */
 #define MACB_HALT_TIMEOUT      1230
 
+/* DMA buffer descriptor might be different size
+ * depends on hardware configuration.
+ */
+static unsigned int macb_dma_desc_get_size(struct macb *bp)
+{
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+       if (bp->hw_dma_cap == HW_DMA_CAP_64B)
+               return sizeof(struct macb_dma_desc) + sizeof(struct macb_dma_desc_64);
+#endif
+       return sizeof(struct macb_dma_desc);
+}
+
+static unsigned int macb_adj_dma_desc_idx(struct macb *bp, unsigned int idx)
+{
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+       /* Dma buffer descriptor is 4 words length (instead of 2 words)
+        * for 64b GEM.
+        */
+       if (bp->hw_dma_cap == HW_DMA_CAP_64B)
+               idx <<= 1;
+#endif
+       return idx;
+}
+
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+static struct macb_dma_desc_64 *macb_64b_desc(struct macb *bp, struct macb_dma_desc *desc)
+{
+       return (struct macb_dma_desc_64 *)((void *)desc + sizeof(struct macb_dma_desc));
+}
+#endif
+
 /* Ring buffer accessors */
 static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index)
 {
@@ -87,7 +118,9 @@ static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index)
 static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue,
                                          unsigned int index)
 {
-       return &queue->tx_ring[macb_tx_ring_wrap(queue->bp, index)];
+       index = macb_tx_ring_wrap(queue->bp, index);
+       index = macb_adj_dma_desc_idx(queue->bp, index);
+       return &queue->tx_ring[index];
 }
 
 static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue,
@@ -101,7 +134,7 @@ static dma_addr_t macb_tx_dma(struct macb_queue *queue, unsigned int index)
        dma_addr_t offset;
 
        offset = macb_tx_ring_wrap(queue->bp, index) *
-                sizeof(struct macb_dma_desc);
+                       macb_dma_desc_get_size(queue->bp);
 
        return queue->tx_ring_dma + offset;
 }
@@ -113,7 +146,9 @@ static unsigned int macb_rx_ring_wrap(struct macb *bp, unsigned int index)
 
 static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index)
 {
-       return &bp->rx_ring[macb_rx_ring_wrap(bp, index)];
+       index = macb_rx_ring_wrap(bp, index);
+       index = macb_adj_dma_desc_idx(bp, index);
+       return &bp->rx_ring[index];
 }
 
 static void *macb_rx_buffer(struct macb *bp, unsigned int index)
@@ -560,12 +595,32 @@ static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb)
        }
 }
 
-static inline void macb_set_addr(struct macb_dma_desc *desc, dma_addr_t addr)
+static void macb_set_addr(struct macb *bp, struct macb_dma_desc *desc, dma_addr_t addr)
 {
-       desc->addr = (u32)addr;
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
-       desc->addrh = (u32)(addr >> 32);
+       struct macb_dma_desc_64 *desc_64;
+
+       if (bp->hw_dma_cap == HW_DMA_CAP_64B) {
+               desc_64 = macb_64b_desc(bp, desc);
+               desc_64->addrh = upper_32_bits(addr);
+       }
 #endif
+       desc->addr = lower_32_bits(addr);
+}
+
+static dma_addr_t macb_get_addr(struct macb *bp, struct macb_dma_desc *desc)
+{
+       dma_addr_t addr = 0;
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+       struct macb_dma_desc_64 *desc_64;
+
+       if (bp->hw_dma_cap == HW_DMA_CAP_64B) {
+               desc_64 = macb_64b_desc(bp, desc);
+               addr = ((u64)(desc_64->addrh) << 32);
+       }
+#endif
+       addr |= MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
+       return addr;
 }
 
 static void macb_tx_error_task(struct work_struct *work)
@@ -649,16 +704,17 @@ static void macb_tx_error_task(struct work_struct *work)
 
        /* Set end of TX queue */
        desc = macb_tx_desc(queue, 0);
-       macb_set_addr(desc, 0);
+       macb_set_addr(bp, desc, 0);
        desc->ctrl = MACB_BIT(TX_USED);
 
        /* Make descriptor updates visible to hardware */
        wmb();
 
        /* Reinitialize the TX desc queue */
-       queue_writel(queue, TBQP, (u32)(queue->tx_ring_dma));
+       queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
-       queue_writel(queue, TBQPH, (u32)(queue->tx_ring_dma >> 32));
+       if (bp->hw_dma_cap == HW_DMA_CAP_64B)
+               queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
 #endif
        /* Make TX ring reflect state of hardware */
        queue->tx_head = 0;
@@ -750,6 +806,7 @@ static void gem_rx_refill(struct macb *bp)
        unsigned int            entry;
        struct sk_buff          *skb;
        dma_addr_t              paddr;
+       struct macb_dma_desc *desc;
 
        while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail,
                          bp->rx_ring_size) > 0) {
@@ -759,6 +816,7 @@ static void gem_rx_refill(struct macb *bp)
                rmb();
 
                bp->rx_prepared_head++;
+               desc = macb_rx_desc(bp, entry);
 
                if (!bp->rx_skbuff[entry]) {
                        /* allocate sk_buff for this free entry in ring */
@@ -782,14 +840,14 @@ static void gem_rx_refill(struct macb *bp)
 
                        if (entry == bp->rx_ring_size - 1)
                                paddr |= MACB_BIT(RX_WRAP);
-                       macb_set_addr(&(bp->rx_ring[entry]), paddr);
-                       bp->rx_ring[entry].ctrl = 0;
+                       macb_set_addr(bp, desc, paddr);
+                       desc->ctrl = 0;
 
                        /* properly align Ethernet header */
                        skb_reserve(skb, NET_IP_ALIGN);
                } else {
-                       bp->rx_ring[entry].addr &= ~MACB_BIT(RX_USED);
-                       bp->rx_ring[entry].ctrl = 0;
+                       desc->addr &= ~MACB_BIT(RX_USED);
+                       desc->ctrl = 0;
                }
        }
 
@@ -835,16 +893,13 @@ static int gem_rx(struct macb *bp, int budget)
                bool rxused;
 
                entry = macb_rx_ring_wrap(bp, bp->rx_tail);
-               desc = &bp->rx_ring[entry];
+               desc = macb_rx_desc(bp, entry);
 
                /* Make hw descriptor updates visible to CPU */
                rmb();
 
                rxused = (desc->addr & MACB_BIT(RX_USED)) ? true : false;
-               addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
-               addr |= ((u64)(desc->addrh) << 32);
-#endif
+               addr = macb_get_addr(bp, desc);
                ctrl = desc->ctrl;
 
                if (!rxused)
@@ -987,15 +1042,17 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
 static inline void macb_init_rx_ring(struct macb *bp)
 {
        dma_addr_t addr;
+       struct macb_dma_desc *desc = NULL;
        int i;
 
        addr = bp->rx_buffers_dma;
        for (i = 0; i < bp->rx_ring_size; i++) {
-               bp->rx_ring[i].addr = addr;
-               bp->rx_ring[i].ctrl = 0;
+               desc = macb_rx_desc(bp, i);
+               macb_set_addr(bp, desc, addr);
+               desc->ctrl = 0;
                addr += bp->rx_buffer_size;
        }
-       bp->rx_ring[bp->rx_ring_size - 1].addr |= MACB_BIT(RX_WRAP);
+       desc->addr |= MACB_BIT(RX_WRAP);
        bp->rx_tail = 0;
 }
 
@@ -1008,15 +1065,14 @@ static int macb_rx(struct macb *bp, int budget)
 
        for (tail = bp->rx_tail; budget > 0; tail++) {
                struct macb_dma_desc *desc = macb_rx_desc(bp, tail);
-               u32 addr, ctrl;
+               u32 ctrl;
 
                /* Make hw descriptor updates visible to CPU */
                rmb();
 
-               addr = desc->addr;
                ctrl = desc->ctrl;
 
-               if (!(addr & MACB_BIT(RX_USED)))
+               if (!(desc->addr & MACB_BIT(RX_USED)))
                        break;
 
                if (ctrl & MACB_BIT(RX_SOF)) {
@@ -1336,7 +1392,7 @@ static unsigned int macb_tx_map(struct macb *bp,
        i = tx_head;
        entry = macb_tx_ring_wrap(bp, i);
        ctrl = MACB_BIT(TX_USED);
-       desc = &queue->tx_ring[entry];
+       desc = macb_tx_desc(queue, entry);
        desc->ctrl = ctrl;
 
        if (lso_ctrl) {
@@ -1358,7 +1414,7 @@ static unsigned int macb_tx_map(struct macb *bp,
                i--;
                entry = macb_tx_ring_wrap(bp, i);
                tx_skb = &queue->tx_skb[entry];
-               desc = &queue->tx_ring[entry];
+               desc = macb_tx_desc(queue, entry);
 
                ctrl = (u32)tx_skb->size;
                if (eof) {
@@ -1379,7 +1435,7 @@ static unsigned int macb_tx_map(struct macb *bp,
                        ctrl |= MACB_BF(MSS_MFS, mss_mfs);
 
                /* Set TX buffer descriptor */
-               macb_set_addr(desc, tx_skb->mapping);
+               macb_set_addr(bp, desc, tx_skb->mapping);
                /* desc->addr must be visible to hardware before clearing
                 * 'TX_USED' bit in desc->ctrl.
                 */
@@ -1586,11 +1642,9 @@ static void gem_free_rx_buffers(struct macb *bp)
                if (!skb)
                        continue;
 
-               desc = &bp->rx_ring[i];
-               addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
-               addr |= ((u64)(desc->addrh) << 32);
-#endif
+               desc = macb_rx_desc(bp, i);
+               addr = macb_get_addr(bp, desc);
+
                dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size,
                                 DMA_FROM_DEVICE);
                dev_kfree_skb_any(skb);
@@ -1711,15 +1765,17 @@ out_err:
 static void gem_init_rings(struct macb *bp)
 {
        struct macb_queue *queue;
+       struct macb_dma_desc *desc = NULL;
        unsigned int q;
        int i;
 
        for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
                for (i = 0; i < bp->tx_ring_size; i++) {
-                       queue->tx_ring[i].addr = 0;
-                       queue->tx_ring[i].ctrl = MACB_BIT(TX_USED);
+                       desc = macb_tx_desc(queue, i);
+                       macb_set_addr(bp, desc, 0);
+                       desc->ctrl = MACB_BIT(TX_USED);
                }
-               queue->tx_ring[bp->tx_ring_size - 1].ctrl |= MACB_BIT(TX_WRAP);
+               desc->ctrl |= MACB_BIT(TX_WRAP);
                queue->tx_head = 0;
                queue->tx_tail = 0;
        }
@@ -1733,16 +1789,18 @@ static void gem_init_rings(struct macb *bp)
 static void macb_init_rings(struct macb *bp)
 {
        int i;
+       struct macb_dma_desc *desc = NULL;
 
        macb_init_rx_ring(bp);
 
        for (i = 0; i < bp->tx_ring_size; i++) {
-               bp->queues[0].tx_ring[i].addr = 0;
-               bp->queues[0].tx_ring[i].ctrl = MACB_BIT(TX_USED);
+               desc = macb_tx_desc(&bp->queues[0], i);
+               macb_set_addr(bp, desc, 0);
+               desc->ctrl = MACB_BIT(TX_USED);
        }
        bp->queues[0].tx_head = 0;
        bp->queues[0].tx_tail = 0;
-       bp->queues[0].tx_ring[bp->tx_ring_size - 1].ctrl |= MACB_BIT(TX_WRAP);
+       desc->ctrl |= MACB_BIT(TX_WRAP);
 }
 
 static void macb_reset_hw(struct macb *bp)
@@ -1863,7 +1921,8 @@ static void macb_configure_dma(struct macb *bp)
                        dmacfg &= ~GEM_BIT(TXCOEN);
 
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
-               dmacfg |= GEM_BIT(ADDR64);
+               if (bp->hw_dma_cap == HW_DMA_CAP_64B)
+                       dmacfg |= GEM_BIT(ADDR64);
 #endif
                netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n",
                           dmacfg);
@@ -1910,14 +1969,16 @@ static void macb_init_hw(struct macb *bp)
        macb_configure_dma(bp);
 
        /* Initialize TX and RX buffers */
-       macb_writel(bp, RBQP, (u32)(bp->rx_ring_dma));
+       macb_writel(bp, RBQP, lower_32_bits(bp->rx_ring_dma));
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
-       macb_writel(bp, RBQPH, (u32)(bp->rx_ring_dma >> 32));
+       if (bp->hw_dma_cap == HW_DMA_CAP_64B)
+               macb_writel(bp, RBQPH, upper_32_bits(bp->rx_ring_dma));
 #endif
        for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
-               queue_writel(queue, TBQP, (u32)(queue->tx_ring_dma));
+               queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
-               queue_writel(queue, TBQPH, (u32)(queue->tx_ring_dma >> 32));
+               if (bp->hw_dma_cap == HW_DMA_CAP_64B)
+                       queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
 #endif
 
                /* Enable interrupts */
@@ -2627,7 +2688,8 @@ static int macb_init(struct platform_device *pdev)
                        queue->IMR  = GEM_IMR(hw_q - 1);
                        queue->TBQP = GEM_TBQP(hw_q - 1);
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
-                       queue->TBQPH = GEM_TBQPH(hw_q -1);
+                       if (bp->hw_dma_cap == HW_DMA_CAP_64B)
+                               queue->TBQPH = GEM_TBQPH(hw_q - 1);
 #endif
                } else {
                        /* queue0 uses legacy registers */
@@ -2637,7 +2699,8 @@ static int macb_init(struct platform_device *pdev)
                        queue->IMR  = MACB_IMR;
                        queue->TBQP = MACB_TBQP;
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
-                       queue->TBQPH = MACB_TBQPH;
+                       if (bp->hw_dma_cap == HW_DMA_CAP_64B)
+                               queue->TBQPH = MACB_TBQPH;
 #endif
                }
 
@@ -2730,13 +2793,14 @@ static int macb_init(struct platform_device *pdev)
 static int at91ether_start(struct net_device *dev)
 {
        struct macb *lp = netdev_priv(dev);
+       struct macb_dma_desc *desc;
        dma_addr_t addr;
        u32 ctl;
        int i;
 
        lp->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
                                         (AT91ETHER_MAX_RX_DESCR *
-                                         sizeof(struct macb_dma_desc)),
+                                         macb_dma_desc_get_size(lp)),
                                         &lp->rx_ring_dma, GFP_KERNEL);
        if (!lp->rx_ring)
                return -ENOMEM;
@@ -2748,7 +2812,7 @@ static int at91ether_start(struct net_device *dev)
        if (!lp->rx_buffers) {
                dma_free_coherent(&lp->pdev->dev,
                                  AT91ETHER_MAX_RX_DESCR *
-                                 sizeof(struct macb_dma_desc),
+                                 macb_dma_desc_get_size(lp),
                                  lp->rx_ring, lp->rx_ring_dma);
                lp->rx_ring = NULL;
                return -ENOMEM;
@@ -2756,13 +2820,14 @@ static int at91ether_start(struct net_device *dev)
 
        addr = lp->rx_buffers_dma;
        for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) {
-               lp->rx_ring[i].addr = addr;
-               lp->rx_ring[i].ctrl = 0;
+               desc = macb_rx_desc(lp, i);
+               macb_set_addr(lp, desc, addr);
+               desc->ctrl = 0;
                addr += AT91ETHER_MAX_RBUFF_SZ;
        }
 
        /* Set the Wrap bit on the last descriptor */
-       lp->rx_ring[AT91ETHER_MAX_RX_DESCR - 1].addr |= MACB_BIT(RX_WRAP);
+       desc->addr |= MACB_BIT(RX_WRAP);
 
        /* Reset buffer index */
        lp->rx_tail = 0;
@@ -2834,7 +2899,7 @@ static int at91ether_close(struct net_device *dev)
 
        dma_free_coherent(&lp->pdev->dev,
                          AT91ETHER_MAX_RX_DESCR *
-                         sizeof(struct macb_dma_desc),
+                         macb_dma_desc_get_size(lp),
                          lp->rx_ring, lp->rx_ring_dma);
        lp->rx_ring = NULL;
 
@@ -2885,13 +2950,15 @@ static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
 static void at91ether_rx(struct net_device *dev)
 {
        struct macb *lp = netdev_priv(dev);
+       struct macb_dma_desc *desc;
        unsigned char *p_recv;
        struct sk_buff *skb;
        unsigned int pktlen;
 
-       while (lp->rx_ring[lp->rx_tail].addr & MACB_BIT(RX_USED)) {
+       desc = macb_rx_desc(lp, lp->rx_tail);
+       while (desc->addr & MACB_BIT(RX_USED)) {
                p_recv = lp->rx_buffers + lp->rx_tail * AT91ETHER_MAX_RBUFF_SZ;
-               pktlen = MACB_BF(RX_FRMLEN, lp->rx_ring[lp->rx_tail].ctrl);
+               pktlen = MACB_BF(RX_FRMLEN, desc->ctrl);
                skb = netdev_alloc_skb(dev, pktlen + 2);
                if (skb) {
                        skb_reserve(skb, 2);
@@ -2905,17 +2972,19 @@ static void at91ether_rx(struct net_device *dev)
                        lp->stats.rx_dropped++;
                }
 
-               if (lp->rx_ring[lp->rx_tail].ctrl & MACB_BIT(RX_MHASH_MATCH))
+               if (desc->ctrl & MACB_BIT(RX_MHASH_MATCH))
                        lp->stats.multicast++;
 
                /* reset ownership bit */
-               lp->rx_ring[lp->rx_tail].addr &= ~MACB_BIT(RX_USED);
+               desc->addr &= ~MACB_BIT(RX_USED);
 
                /* wrap after last buffer */
                if (lp->rx_tail == AT91ETHER_MAX_RX_DESCR - 1)
                        lp->rx_tail = 0;
                else
                        lp->rx_tail++;
+
+               desc = macb_rx_desc(lp, lp->rx_tail);
        }
 }
 
@@ -3211,8 +3280,11 @@ static int macb_probe(struct platform_device *pdev)
        device_init_wakeup(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET);
 
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
-       if (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1)) > GEM_DBW32)
+       if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) {
                dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
+               bp->hw_dma_cap = HW_DMA_CAP_64B;
+       } else
+               bp->hw_dma_cap = HW_DMA_CAP_32B;
 #endif
 
        spin_lock_init(&bp->lock);
index d67adad67be1c097a339d993a866597b4d202f4d..fc8550a5d47f75df540521c27f07f5c2d03995f9 100644 (file)
 /* Bitfields in DCFG6. */
 #define GEM_PBUF_LSO_OFFSET                    27
 #define GEM_PBUF_LSO_SIZE                      1
+#define GEM_DAW64_OFFSET                       23
+#define GEM_DAW64_SIZE                         1
 
 /* Constants for CLK */
 #define MACB_CLK_DIV8                          0
 struct macb_dma_desc {
        u32     addr;
        u32     ctrl;
+};
+
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
-       u32     addrh;
-       u32     resvd;
-#endif
+enum macb_hw_dma_cap {
+       HW_DMA_CAP_32B,
+       HW_DMA_CAP_64B,
 };
 
+struct macb_dma_desc_64 {
+       u32 addrh;
+       u32 resvd;
+};
+#endif
+
 /* DMA descriptor bitfields */
 #define MACB_RX_USED_OFFSET                    0
 #define MACB_RX_USED_SIZE                      1
@@ -874,6 +884,10 @@ struct macb {
        unsigned int            jumbo_max_len;
 
        u32                     wol;
+
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+       enum macb_hw_dma_cap hw_dma_cap;
+#endif
 };
 
 static inline bool macb_is_gem(struct macb *bp)
index 2f85b64f01fa06d708e52bde776708f96b55c30e..1e4695270da6cc422c441542783a0ae24dd943a6 100644 (file)
@@ -31,6 +31,7 @@ struct lmac {
        u8                      lmac_type;
        u8                      lane_to_sds;
        bool                    use_training;
+       bool                    autoneg;
        bool                    link_up;
        int                     lmacid; /* ID within BGX */
        int                     lmacid_bd; /* ID on board */
@@ -461,7 +462,17 @@ static int bgx_lmac_sgmii_init(struct bgx *bgx, struct lmac *lmac)
        /* power down, reset autoneg, autoneg enable */
        cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MRX_CTL);
        cfg &= ~PCS_MRX_CTL_PWR_DN;
-       cfg |= (PCS_MRX_CTL_RST_AN | PCS_MRX_CTL_AN_EN);
+       cfg |= PCS_MRX_CTL_RST_AN;
+       if (lmac->phydev) {
+               cfg |= PCS_MRX_CTL_AN_EN;
+       } else {
+               /* In scenarios where PHY driver is not present or it's a
+                * non-standard PHY, FW sets AN_EN to inform Linux driver
+                * to do auto-neg and link polling or not.
+                */
+               if (cfg & PCS_MRX_CTL_AN_EN)
+                       lmac->autoneg = true;
+       }
        bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, cfg);
 
        if (lmac->lmac_type == BGX_MODE_QSGMII) {
@@ -472,7 +483,7 @@ static int bgx_lmac_sgmii_init(struct bgx *bgx, struct lmac *lmac)
                return 0;
        }
 
-       if (lmac->lmac_type == BGX_MODE_SGMII) {
+       if ((lmac->lmac_type == BGX_MODE_SGMII) && lmac->phydev) {
                if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_STATUS,
                                 PCS_MRX_STATUS_AN_CPT, false)) {
                        dev_err(&bgx->pdev->dev, "BGX AN_CPT not completed\n");
@@ -678,12 +689,71 @@ static int bgx_xaui_check_link(struct lmac *lmac)
        return -1;
 }
 
+static void bgx_poll_for_sgmii_link(struct lmac *lmac)
+{
+       u64 pcs_link, an_result;
+       u8 speed;
+
+       pcs_link = bgx_reg_read(lmac->bgx, lmac->lmacid,
+                               BGX_GMP_PCS_MRX_STATUS);
+
+       /*Link state bit is sticky, read it again*/
+       if (!(pcs_link & PCS_MRX_STATUS_LINK))
+               pcs_link = bgx_reg_read(lmac->bgx, lmac->lmacid,
+                                       BGX_GMP_PCS_MRX_STATUS);
+
+       if (bgx_poll_reg(lmac->bgx, lmac->lmacid, BGX_GMP_PCS_MRX_STATUS,
+                        PCS_MRX_STATUS_AN_CPT, false)) {
+               lmac->link_up = false;
+               lmac->last_speed = SPEED_UNKNOWN;
+               lmac->last_duplex = DUPLEX_UNKNOWN;
+               goto next_poll;
+       }
+
+       lmac->link_up = ((pcs_link & PCS_MRX_STATUS_LINK) != 0) ? true : false;
+       an_result = bgx_reg_read(lmac->bgx, lmac->lmacid,
+                                BGX_GMP_PCS_ANX_AN_RESULTS);
+
+       speed = (an_result >> 3) & 0x3;
+       lmac->last_duplex = (an_result >> 1) & 0x1;
+       switch (speed) {
+       case 0:
+               lmac->last_speed = 10;
+               break;
+       case 1:
+               lmac->last_speed = 100;
+               break;
+       case 2:
+               lmac->last_speed = 1000;
+               break;
+       default:
+               lmac->link_up = false;
+               lmac->last_speed = SPEED_UNKNOWN;
+               lmac->last_duplex = DUPLEX_UNKNOWN;
+               break;
+       }
+
+next_poll:
+
+       if (lmac->last_link != lmac->link_up) {
+               if (lmac->link_up)
+                       bgx_sgmii_change_link_state(lmac);
+               lmac->last_link = lmac->link_up;
+       }
+
+       queue_delayed_work(lmac->check_link, &lmac->dwork, HZ * 3);
+}
+
 static void bgx_poll_for_link(struct work_struct *work)
 {
        struct lmac *lmac;
        u64 spu_link, smu_link;
 
        lmac = container_of(work, struct lmac, dwork.work);
+       if (lmac->is_sgmii) {
+               bgx_poll_for_sgmii_link(lmac);
+               return;
+       }
 
        /* Receive link is latching low. Force it high and verify it */
        bgx_reg_modify(lmac->bgx, lmac->lmacid,
@@ -775,9 +845,21 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
            (lmac->lmac_type != BGX_MODE_XLAUI) &&
            (lmac->lmac_type != BGX_MODE_40G_KR) &&
            (lmac->lmac_type != BGX_MODE_10G_KR)) {
-               if (!lmac->phydev)
-                       return -ENODEV;
-
+               if (!lmac->phydev) {
+                       if (lmac->autoneg) {
+                               bgx_reg_write(bgx, lmacid,
+                                             BGX_GMP_PCS_LINKX_TIMER,
+                                             PCS_LINKX_TIMER_COUNT);
+                               goto poll;
+                       } else {
+                               /* Default to below link speed and duplex */
+                               lmac->link_up = true;
+                               lmac->last_speed = 1000;
+                               lmac->last_duplex = 1;
+                               bgx_sgmii_change_link_state(lmac);
+                               return 0;
+                       }
+               }
                lmac->phydev->dev_flags = 0;
 
                if (phy_connect_direct(&lmac->netdev, lmac->phydev,
@@ -786,15 +868,17 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
                        return -ENODEV;
 
                phy_start_aneg(lmac->phydev);
-       } else {
-               lmac->check_link = alloc_workqueue("check_link", WQ_UNBOUND |
-                                                  WQ_MEM_RECLAIM, 1);
-               if (!lmac->check_link)
-                       return -ENOMEM;
-               INIT_DELAYED_WORK(&lmac->dwork, bgx_poll_for_link);
-               queue_delayed_work(lmac->check_link, &lmac->dwork, 0);
+               return 0;
        }
 
+poll:
+       lmac->check_link = alloc_workqueue("check_link", WQ_UNBOUND |
+                                          WQ_MEM_RECLAIM, 1);
+       if (!lmac->check_link)
+               return -ENOMEM;
+       INIT_DELAYED_WORK(&lmac->dwork, bgx_poll_for_link);
+       queue_delayed_work(lmac->check_link, &lmac->dwork, 0);
+
        return 0;
 }
 
index c18ebfeb203919ea9b16e406bb678bd05a98a656..a60f189429bb658cb5ab8383982f86ddd9090fc3 100644 (file)
 #define         PCS_MRX_CTL_LOOPBACK1                  BIT_ULL(14)
 #define         PCS_MRX_CTL_RESET                      BIT_ULL(15)
 #define BGX_GMP_PCS_MRX_STATUS         0x30008
+#define         PCS_MRX_STATUS_LINK                    BIT_ULL(2)
 #define         PCS_MRX_STATUS_AN_CPT                  BIT_ULL(5)
+#define BGX_GMP_PCS_ANX_ADV            0x30010
 #define BGX_GMP_PCS_ANX_AN_RESULTS     0x30020
+#define BGX_GMP_PCS_LINKX_TIMER                0x30040
+#define PCS_LINKX_TIMER_COUNT                  0x1E84
 #define BGX_GMP_PCS_SGM_AN_ADV         0x30068
 #define BGX_GMP_PCS_MISCX_CTL          0x30078
+#define  PCS_MISC_CTL_MODE                     BIT_ULL(8)
 #define  PCS_MISC_CTL_DISP_EN                  BIT_ULL(13)
 #define  PCS_MISC_CTL_GMX_ENO                  BIT_ULL(11)
 #define  PCS_MISC_CTL_SAMP_PT_MASK     0x7Full
index 67befedef7098ddbde763738eb7940116efa283e..578c7f8f11bf23add2ac4d3c2263e371b4509136 100644 (file)
@@ -116,8 +116,7 @@ void xcv_setup_link(bool link_up, int link_speed)
        int speed = 2;
 
        if (!xcv) {
-               dev_err(&xcv->pdev->dev,
-                       "XCV init not done, probe may have failed\n");
+               pr_err("XCV init not done, probe may have failed\n");
                return;
        }
 
index 1a7f8ad7b9c6111ea2f8839a5d28c82af1ef13a8..cd49a54c538d5202f1bb0cb632b8fdb306a66989 100644 (file)
@@ -362,8 +362,10 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
                status = -EPERM;
                goto err;
        }
-done:
+
+       /* Remember currently programmed MAC */
        ether_addr_copy(adapter->dev_mac, addr->sa_data);
+done:
        ether_addr_copy(netdev->dev_addr, addr->sa_data);
        dev_info(dev, "MAC address changed to %pM\n", addr->sa_data);
        return 0;
@@ -3618,8 +3620,10 @@ static void be_disable_if_filters(struct be_adapter *adapter)
 {
        /* Don't delete MAC on BE3 VFs without FILTMGMT privilege  */
        if (!BEx_chip(adapter) || !be_virtfn(adapter) ||
-           check_privilege(adapter, BE_PRIV_FILTMGMT))
+           check_privilege(adapter, BE_PRIV_FILTMGMT)) {
                be_dev_mac_del(adapter, adapter->pmac_id[0]);
+               eth_zero_addr(adapter->dev_mac);
+       }
 
        be_clear_uc_list(adapter);
        be_clear_mc_list(adapter);
@@ -3773,12 +3777,27 @@ static int be_enable_if_filters(struct be_adapter *adapter)
        if (status)
                return status;
 
-       /* Don't add MAC on BE3 VFs without FILTMGMT privilege */
-       if (!BEx_chip(adapter) || !be_virtfn(adapter) ||
-           check_privilege(adapter, BE_PRIV_FILTMGMT)) {
+       /* Normally this condition usually true as the ->dev_mac is zeroed.
+        * But on BE3 VFs the initial MAC is pre-programmed by PF and
+        * subsequent be_dev_mac_add() can fail (after fresh boot)
+        */
+       if (!ether_addr_equal(adapter->dev_mac, adapter->netdev->dev_addr)) {
+               int old_pmac_id = -1;
+
+               /* Remember old programmed MAC if any - can happen on BE3 VF */
+               if (!is_zero_ether_addr(adapter->dev_mac))
+                       old_pmac_id = adapter->pmac_id[0];
+
                status = be_dev_mac_add(adapter, adapter->netdev->dev_addr);
                if (status)
                        return status;
+
+               /* Delete the old programmed MAC as we successfully programmed
+                * a new MAC
+                */
+               if (old_pmac_id >= 0 && old_pmac_id != adapter->pmac_id[0])
+                       be_dev_mac_del(adapter, old_pmac_id);
+
                ether_addr_copy(adapter->dev_mac, adapter->netdev->dev_addr);
        }
 
@@ -4552,6 +4571,10 @@ static int be_mac_setup(struct be_adapter *adapter)
 
                memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
                memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
+
+               /* Initial MAC for BE3 VFs is already programmed by PF */
+               if (BEx_chip(adapter) && be_virtfn(adapter))
+                       memcpy(adapter->dev_mac, mac, ETH_ALEN);
        }
 
        return 0;
index c9b7ad65e5633bc2a5147706f5c728f4d1cffa3c..726b5693ae8a5a3a9364e5de13195d36c5b1d06f 100644 (file)
@@ -1668,7 +1668,7 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
 
 free_buffers:
        /* compensate sw bpool counter changes */
-       for (i--; i > 0; i--) {
+       for (i--; i >= 0; i--) {
                dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
                if (dpaa_bp) {
                        count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
index 38160c2bebcb33a0214c5add50a2c71fc439ef5f..8be7034b2e7ba4168ab962b951419a498ca9cc7d 100644 (file)
@@ -2910,6 +2910,7 @@ static void set_multicast_list(struct net_device *ndev)
        struct netdev_hw_addr *ha;
        unsigned int i, bit, data, crc, tmp;
        unsigned char hash;
+       unsigned int hash_high = 0, hash_low = 0;
 
        if (ndev->flags & IFF_PROMISC) {
                tmp = readl(fep->hwp + FEC_R_CNTRL);
@@ -2932,11 +2933,7 @@ static void set_multicast_list(struct net_device *ndev)
                return;
        }
 
-       /* Clear filter and add the addresses in hash register
-        */
-       writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
-       writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
-
+       /* Add the addresses in hash register */
        netdev_for_each_mc_addr(ha, ndev) {
                /* calculate crc32 value of mac address */
                crc = 0xffffffff;
@@ -2954,16 +2951,14 @@ static void set_multicast_list(struct net_device *ndev)
                 */
                hash = (crc >> (32 - FEC_HASH_BITS)) & 0x3f;
 
-               if (hash > 31) {
-                       tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
-                       tmp |= 1 << (hash - 32);
-                       writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
-               } else {
-                       tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_LOW);
-                       tmp |= 1 << hash;
-                       writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
-               }
+               if (hash > 31)
+                       hash_high |= 1 << (hash - 32);
+               else
+                       hash_low |= 1 << hash;
        }
+
+       writel(hash_high, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
+       writel(hash_low, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
 }
 
 /* Set a MAC change in hardware. */
index a6e7afa878befd1abe00404b2cd4becfa174d103..957bfc220978479a5ccee32b58ae26d4236fe939 100644 (file)
@@ -2010,8 +2010,8 @@ static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
                if (!rxb->page)
                        continue;
 
-               dma_unmap_single(rx_queue->dev, rxb->dma,
-                                PAGE_SIZE, DMA_FROM_DEVICE);
+               dma_unmap_page(rx_queue->dev, rxb->dma,
+                              PAGE_SIZE, DMA_FROM_DEVICE);
                __free_page(rxb->page);
 
                rxb->page = NULL;
@@ -2948,7 +2948,7 @@ static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus,
        }
 
        /* try reuse page */
-       if (unlikely(page_count(page) != 1))
+       if (unlikely(page_count(page) != 1 || page_is_pfmemalloc(page)))
                return false;
 
        /* change offset to the other half */
index 87226685f74215a2093e59a99bd8042a2e2585c5..8fa18fc17cd2e25f2e3458e608abe6f5a96b60d9 100644 (file)
 
 static inline void dsaf_write_reg(void __iomem *base, u32 reg, u32 value)
 {
-       u8 __iomem *reg_addr = ACCESS_ONCE(base);
-
-       writel(value, reg_addr + reg);
+       writel(value, base + reg);
 }
 
 #define dsaf_write_dev(a, reg, value) \
@@ -1024,9 +1022,7 @@ static inline void dsaf_write_reg(void __iomem *base, u32 reg, u32 value)
 
 static inline u32 dsaf_read_reg(u8 __iomem *base, u32 reg)
 {
-       u8 __iomem *reg_addr = ACCESS_ONCE(base);
-
-       return readl(reg_addr + reg);
+       return readl(base + reg);
 }
 
 static inline void dsaf_write_syscon(struct regmap *base, u32 reg, u32 value)
index 672b64606321c3a1e2ab8eb9be8666ad04713d5f..8aed72860e7c0eece690c97ae38b1fbedfa58557 100644 (file)
@@ -305,8 +305,8 @@ int hns_nic_net_xmit_hw(struct net_device *ndev,
                        struct hns_nic_ring_data *ring_data)
 {
        struct hns_nic_priv *priv = netdev_priv(ndev);
-       struct device *dev = priv->dev;
        struct hnae_ring *ring = ring_data->ring;
+       struct device *dev = ring_to_dev(ring);
        struct netdev_queue *dev_queue;
        struct skb_frag_struct *frag;
        int buf_num;
index a831f947ca8c1157737a18b69c611105442f9276..309f5c66083cf504e8beaeff9f4575e40c646063 100644 (file)
@@ -1601,8 +1601,11 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
        netdev->netdev_ops = &ibmveth_netdev_ops;
        netdev->ethtool_ops = &netdev_ethtool_ops;
        SET_NETDEV_DEV(netdev, &dev->dev);
-       netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
-               NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+       netdev->hw_features = NETIF_F_SG;
+       if (vio_get_attribute(dev, "ibm,illan-options", NULL) != NULL) {
+               netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+                                      NETIF_F_RXCSUM;
+       }
 
        netdev->features |= netdev->hw_features;
 
index c12596676bbbba5ef426b1e8463e7080bdd2539b..a07b8d79174cd0aa4f338df7214ea4e9601076dc 100644 (file)
@@ -189,9 +189,10 @@ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
        }
        ltb->map_id = adapter->map_id;
        adapter->map_id++;
+
+       init_completion(&adapter->fw_done);
        send_request_map(adapter, ltb->addr,
                         ltb->size, ltb->map_id);
-       init_completion(&adapter->fw_done);
        wait_for_completion(&adapter->fw_done);
        return 0;
 }
@@ -505,7 +506,7 @@ rx_pool_alloc_failed:
        adapter->rx_pool = NULL;
 rx_pool_arr_alloc_failed:
        for (i = 0; i < adapter->req_rx_queues; i++)
-               napi_enable(&adapter->napi[i]);
+               napi_disable(&adapter->napi[i]);
 alloc_napi_failed:
        return -ENOMEM;
 }
@@ -1121,10 +1122,10 @@ static void ibmvnic_get_ethtool_stats(struct net_device *dev,
        crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
        crq.request_statistics.len =
            cpu_to_be32(sizeof(struct ibmvnic_statistics));
-       ibmvnic_send_crq(adapter, &crq);
 
        /* Wait for data to be written */
        init_completion(&adapter->stats_done);
+       ibmvnic_send_crq(adapter, &crq);
        wait_for_completion(&adapter->stats_done);
 
        for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
@@ -1496,7 +1497,7 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
                adapter->req_rx_queues = adapter->opt_rx_comp_queues;
                adapter->req_rx_add_queues = adapter->max_rx_add_queues;
 
-               adapter->req_mtu = adapter->max_mtu;
+               adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN;
        }
 
        total_queues = adapter->req_tx_queues + adapter->req_rx_queues;
@@ -2185,12 +2186,12 @@ static void handle_error_info_rsp(union ibmvnic_crq *crq,
 
        if (!found) {
                dev_err(dev, "Couldn't find error id %x\n",
-                       crq->request_error_rsp.error_id);
+                       be32_to_cpu(crq->request_error_rsp.error_id));
                return;
        }
 
        dev_err(dev, "Detailed info for error id %x:",
-               crq->request_error_rsp.error_id);
+               be32_to_cpu(crq->request_error_rsp.error_id));
 
        for (i = 0; i < error_buff->len; i++) {
                pr_cont("%02x", (int)error_buff->buff[i]);
@@ -2269,8 +2270,8 @@ static void handle_error_indication(union ibmvnic_crq *crq,
        dev_err(dev, "Firmware reports %serror id %x, cause %d\n",
                crq->error_indication.
                    flags & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
-               crq->error_indication.error_id,
-               crq->error_indication.error_cause);
+               be32_to_cpu(crq->error_indication.error_id),
+               be16_to_cpu(crq->error_indication.error_cause));
 
        error_buff = kmalloc(sizeof(*error_buff), GFP_ATOMIC);
        if (!error_buff)
@@ -2388,10 +2389,10 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq,
        case PARTIALSUCCESS:
                dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
                         *req_value,
-                        (long int)be32_to_cpu(crq->request_capability_rsp.
+                        (long int)be64_to_cpu(crq->request_capability_rsp.
                                               number), name);
                release_sub_crqs_no_irqs(adapter);
-               *req_value = be32_to_cpu(crq->request_capability_rsp.number);
+               *req_value = be64_to_cpu(crq->request_capability_rsp.number);
                init_sub_crqs(adapter, 1);
                return;
        default:
@@ -2626,12 +2627,12 @@ static void handle_query_cap_rsp(union ibmvnic_crq *crq,
                break;
        case MIN_MTU:
                adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
-               netdev->min_mtu = adapter->min_mtu;
+               netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
                netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
                break;
        case MAX_MTU:
                adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
-               netdev->max_mtu = adapter->max_mtu;
+               netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
                netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
                break;
        case MAX_MULTICAST_FILTERS:
@@ -2799,9 +2800,9 @@ static ssize_t trace_read(struct file *file, char __user *user_buf, size_t len,
        crq.collect_fw_trace.correlator = adapter->ras_comps[num].correlator;
        crq.collect_fw_trace.ioba = cpu_to_be32(trace_tok);
        crq.collect_fw_trace.len = adapter->ras_comps[num].trace_buff_size;
-       ibmvnic_send_crq(adapter, &crq);
 
        init_completion(&adapter->fw_done);
+       ibmvnic_send_crq(adapter, &crq);
        wait_for_completion(&adapter->fw_done);
 
        if (*ppos + len > be32_to_cpu(adapter->ras_comps[num].trace_buff_size))
@@ -3581,9 +3582,9 @@ static int ibmvnic_dump_show(struct seq_file *seq, void *v)
        memset(&crq, 0, sizeof(crq));
        crq.request_dump_size.first = IBMVNIC_CRQ_CMD;
        crq.request_dump_size.cmd = REQUEST_DUMP_SIZE;
-       ibmvnic_send_crq(adapter, &crq);
 
        init_completion(&adapter->fw_done);
+       ibmvnic_send_crq(adapter, &crq);
        wait_for_completion(&adapter->fw_done);
 
        seq_write(seq, adapter->dump_data, adapter->dump_data_size);
@@ -3629,8 +3630,8 @@ static void handle_crq_init_rsp(struct work_struct *work)
                }
        }
 
-       send_version_xchg(adapter);
        reinit_completion(&adapter->init_done);
+       send_version_xchg(adapter);
        if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
                dev_err(dev, "Passive init timeout\n");
                goto task_failed;
@@ -3640,9 +3641,9 @@ static void handle_crq_init_rsp(struct work_struct *work)
                if (adapter->renegotiate) {
                        adapter->renegotiate = false;
                        release_sub_crqs_no_irqs(adapter);
-                       send_cap_queries(adapter);
 
                        reinit_completion(&adapter->init_done);
+                       send_cap_queries(adapter);
                        if (!wait_for_completion_timeout(&adapter->init_done,
                                                         timeout)) {
                                dev_err(dev, "Passive init timeout\n");
@@ -3656,9 +3657,7 @@ static void handle_crq_init_rsp(struct work_struct *work)
                goto task_failed;
 
        netdev->real_num_tx_queues = adapter->req_tx_queues;
-       netdev->mtu = adapter->req_mtu;
-       netdev->min_mtu = adapter->min_mtu;
-       netdev->max_mtu = adapter->max_mtu;
+       netdev->mtu = adapter->req_mtu - ETH_HLEN;
 
        if (adapter->failover) {
                adapter->failover = false;
@@ -3772,9 +3771,9 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
                        adapter->debugfs_dump = ent;
                }
        }
-       ibmvnic_send_crq_init(adapter);
 
        init_completion(&adapter->init_done);
+       ibmvnic_send_crq_init(adapter);
        if (!wait_for_completion_timeout(&adapter->init_done, timeout))
                return 0;
 
@@ -3782,9 +3781,9 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
                if (adapter->renegotiate) {
                        adapter->renegotiate = false;
                        release_sub_crqs_no_irqs(adapter);
-                       send_cap_queries(adapter);
 
                        reinit_completion(&adapter->init_done);
+                       send_cap_queries(adapter);
                        if (!wait_for_completion_timeout(&adapter->init_done,
                                                         timeout))
                                return 0;
@@ -3798,7 +3797,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
        }
 
        netdev->real_num_tx_queues = adapter->req_tx_queues;
-       netdev->mtu = adapter->req_mtu;
+       netdev->mtu = adapter->req_mtu - ETH_HLEN;
 
        rc = register_netdev(netdev);
        if (rc) {
index 3dd87889e67e1a69189035631c364b95d85dabf1..1c29c86f8709f16bc75087d023858185b37c2faf 100644 (file)
@@ -2517,7 +2517,7 @@ static int mtk_remove(struct platform_device *pdev)
 }
 
 const struct of_device_id of_mtk_match[] = {
-       { .compatible = "mediatek,mt7623-eth" },
+       { .compatible = "mediatek,mt2701-eth" },
        {},
 };
 MODULE_DEVICE_TABLE(of, of_mtk_match);
index c7e939945259dc876b66cfedd0d85f9d7e90a914..53daa6ca5d83b60f7ad8632694658922921f82f5 100644 (file)
@@ -158,7 +158,7 @@ static int mlx4_reset_slave(struct mlx4_dev *dev)
        return -ETIMEDOUT;
 }
 
-static int mlx4_comm_internal_err(u32 slave_read)
+int mlx4_comm_internal_err(u32 slave_read)
 {
        return (u32)COMM_CHAN_EVENT_INTERNAL_ERR ==
                (slave_read & (u32)COMM_CHAN_EVENT_INTERNAL_ERR) ? 1 : 0;
index d9c9f86a30df953fa555934c5406057dcaf28960..9aa4226919542f6496fedce45a09e09685433efe 100644 (file)
@@ -1099,7 +1099,7 @@ static int mlx4_en_set_ringparam(struct net_device *dev,
        memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
        new_prof.tx_ring_size = tx_size;
        new_prof.rx_ring_size = rx_size;
-       err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof);
+       err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true);
        if (err)
                goto out;
 
@@ -1732,8 +1732,6 @@ static void mlx4_en_get_channels(struct net_device *dev,
 {
        struct mlx4_en_priv *priv = netdev_priv(dev);
 
-       memset(channel, 0, sizeof(*channel));
-
        channel->max_rx = MAX_RX_RINGS;
        channel->max_tx = MLX4_EN_MAX_TX_RING_P_UP;
 
@@ -1752,10 +1750,7 @@ static int mlx4_en_set_channels(struct net_device *dev,
        int xdp_count;
        int err = 0;
 
-       if (channel->other_count || channel->combined_count ||
-           channel->tx_count > MLX4_EN_MAX_TX_RING_P_UP ||
-           channel->rx_count > MAX_RX_RINGS ||
-           !channel->tx_count || !channel->rx_count)
+       if (!channel->tx_count || !channel->rx_count)
                return -EINVAL;
 
        tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
@@ -1779,7 +1774,7 @@ static int mlx4_en_set_channels(struct net_device *dev,
        new_prof.tx_ring_num[TX_XDP] = xdp_count;
        new_prof.rx_ring_num = channel->rx_count;
 
-       err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof);
+       err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true);
        if (err)
                goto out;
 
index 761f8b12399cab245abccc0f7d7f84fde742c14d..3b4961a8e8e44d6987ebd23f9239e747c7fc6cd5 100644 (file)
@@ -2042,6 +2042,8 @@ static void mlx4_en_free_resources(struct mlx4_en_priv *priv)
                        if (priv->tx_cq[t] && priv->tx_cq[t][i])
                                mlx4_en_destroy_cq(priv, &priv->tx_cq[t][i]);
                }
+               kfree(priv->tx_ring[t]);
+               kfree(priv->tx_cq[t]);
        }
 
        for (i = 0; i < priv->rx_ring_num; i++) {
@@ -2184,9 +2186,11 @@ static void mlx4_en_update_priv(struct mlx4_en_priv *dst,
 
 int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
                                struct mlx4_en_priv *tmp,
-                               struct mlx4_en_port_profile *prof)
+                               struct mlx4_en_port_profile *prof,
+                               bool carry_xdp_prog)
 {
-       int t;
+       struct bpf_prog *xdp_prog;
+       int i, t;
 
        mlx4_en_copy_priv(tmp, priv, prof);
 
@@ -2200,6 +2204,23 @@ int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
                }
                return -ENOMEM;
        }
+
+       /* All rx_rings has the same xdp_prog.  Pick the first one. */
+       xdp_prog = rcu_dereference_protected(
+               priv->rx_ring[0]->xdp_prog,
+               lockdep_is_held(&priv->mdev->state_lock));
+
+       if (xdp_prog && carry_xdp_prog) {
+               xdp_prog = bpf_prog_add(xdp_prog, tmp->rx_ring_num);
+               if (IS_ERR(xdp_prog)) {
+                       mlx4_en_free_resources(tmp);
+                       return PTR_ERR(xdp_prog);
+               }
+               for (i = 0; i < tmp->rx_ring_num; i++)
+                       rcu_assign_pointer(tmp->rx_ring[i]->xdp_prog,
+                                          xdp_prog);
+       }
+
        return 0;
 }
 
@@ -2214,7 +2235,6 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
 {
        struct mlx4_en_priv *priv = netdev_priv(dev);
        struct mlx4_en_dev *mdev = priv->mdev;
-       int t;
 
        en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
 
@@ -2248,11 +2268,6 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
        mlx4_en_free_resources(priv);
        mutex_unlock(&mdev->state_lock);
 
-       for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
-               kfree(priv->tx_ring[t]);
-               kfree(priv->tx_cq[t]);
-       }
-
        free_netdev(dev);
 }
 
@@ -2755,7 +2770,7 @@ static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog)
                en_warn(priv, "Reducing the number of TX rings, to not exceed the max total rings number.\n");
        }
 
-       err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof);
+       err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, false);
        if (err) {
                if (prog)
                        bpf_prog_sub(prog, priv->rx_ring_num - 1);
@@ -3499,7 +3514,7 @@ int mlx4_en_reset_config(struct net_device *dev,
        memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
        memcpy(&new_prof.hwtstamp_config, &ts_config, sizeof(ts_config));
 
-       err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof);
+       err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true);
        if (err)
                goto out;
 
index eac527e25ec902c2a586e9952272b9e8e599e2c8..cc003fdf0ed929a981b1403f6a7d0099825fec4b 100644 (file)
@@ -514,8 +514,11 @@ void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv)
                return;
 
        for (ring = 0; ring < priv->rx_ring_num; ring++) {
-               if (mlx4_en_is_ring_empty(priv->rx_ring[ring]))
+               if (mlx4_en_is_ring_empty(priv->rx_ring[ring])) {
+                       local_bh_disable();
                        napi_reschedule(&priv->rx_cq[ring]->napi);
+                       local_bh_enable();
+               }
        }
 }
 
index 0e8b7c44931f907ed881d093077e93b92ae0305d..8258d08acd8c2029a8bcb812dd5efd85d8c7b0f2 100644 (file)
@@ -222,6 +222,18 @@ void mlx4_unregister_device(struct mlx4_dev *dev)
                return;
 
        mlx4_stop_catas_poll(dev);
+       if (dev->persist->interface_state & MLX4_INTERFACE_STATE_DELETION &&
+           mlx4_is_slave(dev)) {
+               /* In mlx4_remove_one on a VF */
+               u32 slave_read =
+                       swab32(readl(&mlx4_priv(dev)->mfunc.comm->slave_read));
+
+               if (mlx4_comm_internal_err(slave_read)) {
+                       mlx4_dbg(dev, "%s: comm channel is down, entering error state.\n",
+                                __func__);
+                       mlx4_enter_error_state(dev->persist);
+               }
+       }
        mutex_lock(&intf_mutex);
 
        list_for_each_entry(intf, &intf_list, list)
index 88ee7d8a59231a47d6b7aca2006f9780dbefa578..086920b615af7180e891893ffd00928c0bd0238f 100644 (file)
@@ -1220,6 +1220,7 @@ void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type);
 void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type);
 
 void mlx4_enter_error_state(struct mlx4_dev_persistent *persist);
+int mlx4_comm_internal_err(u32 slave_read);
 
 int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port,
                    enum mlx4_port_type *type);
index ba1c6cd0cc79590075f4420a930b613c9fdedc62..cec59bc264c9ac197048fd7c98bcd5cf25de0efd 100644 (file)
@@ -679,7 +679,8 @@ void mlx4_en_set_stats_bitmap(struct mlx4_dev *dev,
 
 int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
                                struct mlx4_en_priv *tmp,
-                               struct mlx4_en_port_profile *prof);
+                               struct mlx4_en_port_profile *prof,
+                               bool carry_xdp_prog);
 void mlx4_en_safe_replace_resources(struct mlx4_en_priv *priv,
                                    struct mlx4_en_priv *tmp);
 
index 3797cc7c1288078298ec655921f9fc9f804df97e..caa837e5e2b991fc3666776d2050fe20b1c6c7f6 100644 (file)
@@ -1728,7 +1728,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
        if (cmd->cmdif_rev > CMD_IF_REV) {
                dev_err(&dev->pdev->dev, "driver does not support command interface version. driver %d, firmware %d\n",
                        CMD_IF_REV, cmd->cmdif_rev);
-               err = -ENOTSUPP;
+               err = -EOPNOTSUPP;
                goto err_free_page;
        }
 
index 951dbd58594dcd3b32b680f752c1105132d85ba8..d5ecb8f53fd43684f185d590c8dc5553a4f25ab4 100644 (file)
@@ -791,7 +791,8 @@ void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv);
 int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd);
 
 int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, int ix);
-void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv);
+void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_priv *priv, void *tirc,
+                                   enum mlx5e_traffic_types tt);
 
 int mlx5e_open_locked(struct net_device *netdev);
 int mlx5e_close_locked(struct net_device *netdev);
@@ -863,12 +864,12 @@ static inline void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv) {}
 
 static inline int mlx5e_arfs_enable(struct mlx5e_priv *priv)
 {
-       return -ENOTSUPP;
+       return -EOPNOTSUPP;
 }
 
 static inline int mlx5e_arfs_disable(struct mlx5e_priv *priv)
 {
-       return -ENOTSUPP;
+       return -EOPNOTSUPP;
 }
 #else
 int mlx5e_arfs_create_tables(struct mlx5e_priv *priv);
index f0b460f47f2992caad4eec7ea0d655296a46e99c..0523ed47f597c715296c5ea843245625bf3dac62 100644 (file)
@@ -89,7 +89,7 @@ static int mlx5e_dcbnl_ieee_getets(struct net_device *netdev,
        int i;
 
        if (!MLX5_CAP_GEN(priv->mdev, ets))
-               return -ENOTSUPP;
+               return -EOPNOTSUPP;
 
        ets->ets_cap = mlx5_max_tc(priv->mdev) + 1;
        for (i = 0; i < ets->ets_cap; i++) {
@@ -236,7 +236,7 @@ static int mlx5e_dcbnl_ieee_setets(struct net_device *netdev,
        int err;
 
        if (!MLX5_CAP_GEN(priv->mdev, ets))
-               return -ENOTSUPP;
+               return -EOPNOTSUPP;
 
        err = mlx5e_dbcnl_validate_ets(netdev, ets);
        if (err)
@@ -402,7 +402,7 @@ static u8 mlx5e_dcbnl_setall(struct net_device *netdev)
        struct mlx5_core_dev *mdev = priv->mdev;
        struct ieee_ets ets;
        struct ieee_pfc pfc;
-       int err = -ENOTSUPP;
+       int err = -EOPNOTSUPP;
        int i;
 
        if (!MLX5_CAP_GEN(mdev, ets))
@@ -511,6 +511,11 @@ static void mlx5e_dcbnl_getpgtccfgtx(struct net_device *netdev,
        struct mlx5e_priv *priv = netdev_priv(netdev);
        struct mlx5_core_dev *mdev = priv->mdev;
 
+       if (!MLX5_CAP_GEN(priv->mdev, ets)) {
+               netdev_err(netdev, "%s, ets is not supported\n", __func__);
+               return;
+       }
+
        if (priority >= CEE_DCBX_MAX_PRIO) {
                netdev_err(netdev,
                           "%s, priority is out of range\n", __func__);
index 33a399a8b5d52297379ade73f37e61df266905fa..bb67863aa361168a8566349ef356d9a991d411be 100644 (file)
@@ -543,7 +543,6 @@ static int mlx5e_set_channels(struct net_device *dev,
                              struct ethtool_channels *ch)
 {
        struct mlx5e_priv *priv = netdev_priv(dev);
-       int ncv = mlx5e_get_max_num_channels(priv->mdev);
        unsigned int count = ch->combined_count;
        bool arfs_enabled;
        bool was_opened;
@@ -554,16 +553,6 @@ static int mlx5e_set_channels(struct net_device *dev,
                            __func__);
                return -EINVAL;
        }
-       if (ch->rx_count || ch->tx_count) {
-               netdev_info(dev, "%s: separate rx/tx count not supported\n",
-                           __func__);
-               return -EINVAL;
-       }
-       if (count > ncv) {
-               netdev_info(dev, "%s: count (%d) > max (%d)\n",
-                           __func__, count, ncv);
-               return -EINVAL;
-       }
 
        if (priv->params.num_channels == count)
                return 0;
@@ -606,7 +595,7 @@ static int mlx5e_get_coalesce(struct net_device *netdev,
        struct mlx5e_priv *priv = netdev_priv(netdev);
 
        if (!MLX5_CAP_GEN(priv->mdev, cq_moderation))
-               return -ENOTSUPP;
+               return -EOPNOTSUPP;
 
        coal->rx_coalesce_usecs       = priv->params.rx_cq_moderation.usec;
        coal->rx_max_coalesced_frames = priv->params.rx_cq_moderation.pkts;
@@ -631,7 +620,7 @@ static int mlx5e_set_coalesce(struct net_device *netdev,
        int i;
 
        if (!MLX5_CAP_GEN(mdev, cq_moderation))
-               return -ENOTSUPP;
+               return -EOPNOTSUPP;
 
        mutex_lock(&priv->state_lock);
 
@@ -991,15 +980,18 @@ static int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
 
 static void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen)
 {
-       struct mlx5_core_dev *mdev = priv->mdev;
        void *tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
-       int i;
+       struct mlx5_core_dev *mdev = priv->mdev;
+       int ctxlen = MLX5_ST_SZ_BYTES(tirc);
+       int tt;
 
        MLX5_SET(modify_tir_in, in, bitmask.hash, 1);
-       mlx5e_build_tir_ctx_hash(tirc, priv);
 
-       for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
-               mlx5_core_modify_tir(mdev, priv->indir_tir[i].tirn, in, inlen);
+       for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
+               memset(tirc, 0, ctxlen);
+               mlx5e_build_indir_tir_ctx_hash(priv, tirc, tt);
+               mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in, inlen);
+       }
 }
 
 static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
@@ -1007,6 +999,7 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
 {
        struct mlx5e_priv *priv = netdev_priv(dev);
        int inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
+       bool hash_changed = false;
        void *in;
 
        if ((hfunc != ETH_RSS_HASH_NO_CHANGE) &&
@@ -1028,14 +1021,21 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
                mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, 0);
        }
 
-       if (key)
+       if (hfunc != ETH_RSS_HASH_NO_CHANGE &&
+           hfunc != priv->params.rss_hfunc) {
+               priv->params.rss_hfunc = hfunc;
+               hash_changed = true;
+       }
+
+       if (key) {
                memcpy(priv->params.toeplitz_hash_key, key,
                       sizeof(priv->params.toeplitz_hash_key));
+               hash_changed = hash_changed ||
+                              priv->params.rss_hfunc == ETH_RSS_HASH_TOP;
+       }
 
-       if (hfunc != ETH_RSS_HASH_NO_CHANGE)
-               priv->params.rss_hfunc = hfunc;
-
-       mlx5e_modify_tirs_hash(priv, in, inlen);
+       if (hash_changed)
+               mlx5e_modify_tirs_hash(priv, in, inlen);
 
        mutex_unlock(&priv->state_lock);
 
@@ -1307,7 +1307,7 @@ static int mlx5e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
        u32 mlx5_wol_mode;
 
        if (!wol_supported)
-               return -ENOTSUPP;
+               return -EOPNOTSUPP;
 
        if (wol->wolopts & ~wol_supported)
                return -EINVAL;
@@ -1437,7 +1437,7 @@ static int set_pflag_rx_cqe_based_moder(struct net_device *netdev, bool enable)
 
        if (rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE &&
            !MLX5_CAP_GEN(mdev, cq_period_start_from_cqe))
-               return -ENOTSUPP;
+               return -EOPNOTSUPP;
 
        if (!rx_mode_changed)
                return 0;
@@ -1463,7 +1463,7 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev,
        bool reset;
 
        if (!MLX5_CAP_GEN(mdev, cqe_compression))
-               return -ENOTSUPP;
+               return -EOPNOTSUPP;
 
        if (enable && priv->tstamp.hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE) {
                netdev_err(netdev, "Can't enable cqe compression while timestamping is enabled.\n");
index 1fe80de5d68f1f3cf09c6e32530cd32114580051..a0e5a69402b30a349b196eaa72ce1a413b5479b2 100644 (file)
@@ -1089,7 +1089,7 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
                                               MLX5_FLOW_NAMESPACE_KERNEL);
 
        if (!priv->fs.ns)
-               return -EINVAL;
+               return -EOPNOTSUPP;
 
        err = mlx5e_arfs_create_tables(priv);
        if (err) {
index d088effd7160355849faacead1326f2198d12e8d..f33f72d0237c1bafc702f4066dab31ab22963a47 100644 (file)
@@ -92,7 +92,7 @@ static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv,
        ns = mlx5_get_flow_namespace(priv->mdev,
                                     MLX5_FLOW_NAMESPACE_ETHTOOL);
        if (!ns)
-               return ERR_PTR(-ENOTSUPP);
+               return ERR_PTR(-EOPNOTSUPP);
 
        table_size = min_t(u32, BIT(MLX5_CAP_FLOWTABLE(priv->mdev,
                                                       flow_table_properties_nic_receive.log_max_ft_size)),
index 2b7dd315020cd9e1a21b28643621122695cd06dd..f14ca3385fdd683b12f434e289cc8e264040c1ed 100644 (file)
@@ -2022,8 +2022,23 @@ static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv)
        MLX5_SET(tirc, tirc, lro_timeout_period_usecs, priv->params.lro_timeout);
 }
 
-void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv)
+void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_priv *priv, void *tirc,
+                                   enum mlx5e_traffic_types tt)
 {
+       void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
+
+#define MLX5_HASH_IP            (MLX5_HASH_FIELD_SEL_SRC_IP   |\
+                                MLX5_HASH_FIELD_SEL_DST_IP)
+
+#define MLX5_HASH_IP_L4PORTS    (MLX5_HASH_FIELD_SEL_SRC_IP   |\
+                                MLX5_HASH_FIELD_SEL_DST_IP   |\
+                                MLX5_HASH_FIELD_SEL_L4_SPORT |\
+                                MLX5_HASH_FIELD_SEL_L4_DPORT)
+
+#define MLX5_HASH_IP_IPSEC_SPI  (MLX5_HASH_FIELD_SEL_SRC_IP   |\
+                                MLX5_HASH_FIELD_SEL_DST_IP   |\
+                                MLX5_HASH_FIELD_SEL_IPSEC_SPI)
+
        MLX5_SET(tirc, tirc, rx_hash_fn,
                 mlx5e_rx_hash_fn(priv->params.rss_hfunc));
        if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) {
@@ -2035,6 +2050,88 @@ void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv)
                MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
                memcpy(rss_key, priv->params.toeplitz_hash_key, len);
        }
+
+       switch (tt) {
+       case MLX5E_TT_IPV4_TCP:
+               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+                        MLX5_L3_PROT_TYPE_IPV4);
+               MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+                        MLX5_L4_PROT_TYPE_TCP);
+               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+                        MLX5_HASH_IP_L4PORTS);
+               break;
+
+       case MLX5E_TT_IPV6_TCP:
+               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+                        MLX5_L3_PROT_TYPE_IPV6);
+               MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+                        MLX5_L4_PROT_TYPE_TCP);
+               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+                        MLX5_HASH_IP_L4PORTS);
+               break;
+
+       case MLX5E_TT_IPV4_UDP:
+               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+                        MLX5_L3_PROT_TYPE_IPV4);
+               MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+                        MLX5_L4_PROT_TYPE_UDP);
+               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+                        MLX5_HASH_IP_L4PORTS);
+               break;
+
+       case MLX5E_TT_IPV6_UDP:
+               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+                        MLX5_L3_PROT_TYPE_IPV6);
+               MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+                        MLX5_L4_PROT_TYPE_UDP);
+               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+                        MLX5_HASH_IP_L4PORTS);
+               break;
+
+       case MLX5E_TT_IPV4_IPSEC_AH:
+               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+                        MLX5_L3_PROT_TYPE_IPV4);
+               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+                        MLX5_HASH_IP_IPSEC_SPI);
+               break;
+
+       case MLX5E_TT_IPV6_IPSEC_AH:
+               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+                        MLX5_L3_PROT_TYPE_IPV6);
+               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+                        MLX5_HASH_IP_IPSEC_SPI);
+               break;
+
+       case MLX5E_TT_IPV4_IPSEC_ESP:
+               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+                        MLX5_L3_PROT_TYPE_IPV4);
+               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+                        MLX5_HASH_IP_IPSEC_SPI);
+               break;
+
+       case MLX5E_TT_IPV6_IPSEC_ESP:
+               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+                        MLX5_L3_PROT_TYPE_IPV6);
+               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+                        MLX5_HASH_IP_IPSEC_SPI);
+               break;
+
+       case MLX5E_TT_IPV4:
+               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+                        MLX5_L3_PROT_TYPE_IPV4);
+               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+                        MLX5_HASH_IP);
+               break;
+
+       case MLX5E_TT_IPV6:
+               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+                        MLX5_L3_PROT_TYPE_IPV6);
+               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+                        MLX5_HASH_IP);
+               break;
+       default:
+               WARN_ONCE(true, "%s: bad traffic type!\n", __func__);
+       }
 }
 
 static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
@@ -2404,110 +2501,13 @@ void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
 static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
                                      enum mlx5e_traffic_types tt)
 {
-       void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
-
        MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
 
-#define MLX5_HASH_IP            (MLX5_HASH_FIELD_SEL_SRC_IP   |\
-                                MLX5_HASH_FIELD_SEL_DST_IP)
-
-#define MLX5_HASH_IP_L4PORTS    (MLX5_HASH_FIELD_SEL_SRC_IP   |\
-                                MLX5_HASH_FIELD_SEL_DST_IP   |\
-                                MLX5_HASH_FIELD_SEL_L4_SPORT |\
-                                MLX5_HASH_FIELD_SEL_L4_DPORT)
-
-#define MLX5_HASH_IP_IPSEC_SPI  (MLX5_HASH_FIELD_SEL_SRC_IP   |\
-                                MLX5_HASH_FIELD_SEL_DST_IP   |\
-                                MLX5_HASH_FIELD_SEL_IPSEC_SPI)
-
        mlx5e_build_tir_ctx_lro(tirc, priv);
 
        MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
        MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn);
-       mlx5e_build_tir_ctx_hash(tirc, priv);
-
-       switch (tt) {
-       case MLX5E_TT_IPV4_TCP:
-               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
-                        MLX5_L3_PROT_TYPE_IPV4);
-               MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
-                        MLX5_L4_PROT_TYPE_TCP);
-               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-                        MLX5_HASH_IP_L4PORTS);
-               break;
-
-       case MLX5E_TT_IPV6_TCP:
-               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
-                        MLX5_L3_PROT_TYPE_IPV6);
-               MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
-                        MLX5_L4_PROT_TYPE_TCP);
-               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-                        MLX5_HASH_IP_L4PORTS);
-               break;
-
-       case MLX5E_TT_IPV4_UDP:
-               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
-                        MLX5_L3_PROT_TYPE_IPV4);
-               MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
-                        MLX5_L4_PROT_TYPE_UDP);
-               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-                        MLX5_HASH_IP_L4PORTS);
-               break;
-
-       case MLX5E_TT_IPV6_UDP:
-               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
-                        MLX5_L3_PROT_TYPE_IPV6);
-               MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
-                        MLX5_L4_PROT_TYPE_UDP);
-               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-                        MLX5_HASH_IP_L4PORTS);
-               break;
-
-       case MLX5E_TT_IPV4_IPSEC_AH:
-               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
-                        MLX5_L3_PROT_TYPE_IPV4);
-               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-                        MLX5_HASH_IP_IPSEC_SPI);
-               break;
-
-       case MLX5E_TT_IPV6_IPSEC_AH:
-               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
-                        MLX5_L3_PROT_TYPE_IPV6);
-               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-                        MLX5_HASH_IP_IPSEC_SPI);
-               break;
-
-       case MLX5E_TT_IPV4_IPSEC_ESP:
-               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
-                        MLX5_L3_PROT_TYPE_IPV4);
-               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-                        MLX5_HASH_IP_IPSEC_SPI);
-               break;
-
-       case MLX5E_TT_IPV6_IPSEC_ESP:
-               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
-                        MLX5_L3_PROT_TYPE_IPV6);
-               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-                        MLX5_HASH_IP_IPSEC_SPI);
-               break;
-
-       case MLX5E_TT_IPV4:
-               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
-                        MLX5_L3_PROT_TYPE_IPV4);
-               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-                        MLX5_HASH_IP);
-               break;
-
-       case MLX5E_TT_IPV6:
-               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
-                        MLX5_L3_PROT_TYPE_IPV6);
-               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-                        MLX5_HASH_IP);
-               break;
-       default:
-               WARN_ONCE(true,
-                         "mlx5e_build_indir_tir_ctx: bad traffic type!\n");
-       }
+       mlx5e_build_indir_tir_ctx_hash(priv, tirc, tt);
 }
 
 static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
@@ -3331,7 +3331,7 @@ static const struct net_device_ops mlx5e_netdev_ops_sriov = {
 static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
 {
        if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
-               return -ENOTSUPP;
+               return -EOPNOTSUPP;
        if (!MLX5_CAP_GEN(mdev, eth_net_offloads) ||
            !MLX5_CAP_GEN(mdev, nic_flow_table) ||
            !MLX5_CAP_ETH(mdev, csum_cap) ||
@@ -3343,7 +3343,7 @@ static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
                               < 3) {
                mlx5_core_warn(mdev,
                               "Not creating net device, some required device capabilities are missing\n");
-               return -ENOTSUPP;
+               return -EOPNOTSUPP;
        }
        if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable))
                mlx5_core_warn(mdev, "Self loop back prevention is not supported\n");
index 0e2fb3ed1790900ccdc0bbbef8bc5c33267df092..06d5e6fecb0a5ecf255c9d6319ffb285e1a660df 100644 (file)
@@ -193,6 +193,9 @@ static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq,
                return false;
        }
 
+       if (unlikely(page_is_pfmemalloc(dma_info->page)))
+               return false;
+
        cache->page_cache[cache->tail] = *dma_info;
        cache->tail = tail_next;
        return true;
index 46bef6a26a8cdbebf268b6275271367c4109a77d..2ebbe80d8126521cd324090344ecc3865a16657b 100644 (file)
@@ -663,6 +663,7 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
                                   __be32 *saddr,
                                   int *out_ttl)
 {
+       struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
        struct rtable *rt;
        struct neighbour *n = NULL;
        int ttl;
@@ -677,12 +678,11 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
 #else
        return -EOPNOTSUPP;
 #endif
-
-       if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev)) {
-               pr_warn("%s: can't offload, devices not on same HW e-switch\n", __func__);
-               ip_rt_put(rt);
-               return -EOPNOTSUPP;
-       }
+       /* if the egress device isn't on the same HW e-switch, we use the uplink */
+       if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev))
+               *out_dev = mlx5_eswitch_get_uplink_netdev(esw);
+       else
+               *out_dev = rt->dst.dev;
 
        ttl = ip4_dst_hoplimit(&rt->dst);
        n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
@@ -693,7 +693,6 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
        *out_n = n;
        *saddr = fl4->saddr;
        *out_ttl = ttl;
-       *out_dev = rt->dst.dev;
 
        return 0;
 }
@@ -1088,10 +1087,14 @@ int mlx5e_stats_flower(struct mlx5e_priv *priv,
 
        mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
 
+       preempt_disable();
+
        tcf_exts_to_list(f->exts, &actions);
        list_for_each_entry(a, &actions, list)
                tcf_action_stats_update(a, bytes, packets, lastuse);
 
+       preempt_enable();
+
        return 0;
 }
 
index f14d9c9ba77394b83aea50564afd3c762613467a..d0c8bf014453ea38736182c03ba7b2d9c5bcd4d7 100644 (file)
@@ -133,7 +133,7 @@ static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport,
 
        if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) ||
            !MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist))
-               return -ENOTSUPP;
+               return -EOPNOTSUPP;
 
        esw_debug(dev, "Set Vport[%d] VLAN %d qos %d set=%x\n",
                  vport, vlan, qos, set_flags);
@@ -353,7 +353,7 @@ static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw, int nvports)
        root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
        if (!root_ns) {
                esw_warn(dev, "Failed to get FDB flow namespace\n");
-               return -ENOMEM;
+               return -EOPNOTSUPP;
        }
 
        flow_group_in = mlx5_vzalloc(inlen);
@@ -962,7 +962,7 @@ static int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
        root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS);
        if (!root_ns) {
                esw_warn(dev, "Failed to get E-Switch egress flow namespace\n");
-               return -EIO;
+               return -EOPNOTSUPP;
        }
 
        flow_group_in = mlx5_vzalloc(inlen);
@@ -1079,7 +1079,7 @@ static int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
        root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS);
        if (!root_ns) {
                esw_warn(dev, "Failed to get E-Switch ingress flow namespace\n");
-               return -EIO;
+               return -EOPNOTSUPP;
        }
 
        flow_group_in = mlx5_vzalloc(inlen);
@@ -1630,7 +1630,7 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
        if (!MLX5_CAP_GEN(esw->dev, eswitch_flow_table) ||
            !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
                esw_warn(esw->dev, "E-Switch FDB is not supported, aborting ...\n");
-               return -ENOTSUPP;
+               return -EOPNOTSUPP;
        }
 
        if (!MLX5_CAP_ESW_INGRESS_ACL(esw->dev, ft_support))
index 03293ed1cc22d2716ff5708dc2312b7291cc1899..595f7c7383b399440aedec593ae0fb0c37bb6748 100644 (file)
@@ -166,7 +166,7 @@ static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
        return 0;
 
 out_notsupp:
-       return -ENOTSUPP;
+       return -EOPNOTSUPP;
 }
 
 int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
@@ -424,6 +424,7 @@ static int esw_create_offloads_fdb_table(struct mlx5_eswitch *esw, int nvports)
        root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
        if (!root_ns) {
                esw_warn(dev, "Failed to get FDB flow namespace\n");
+               err = -EOPNOTSUPP;
                goto ns_err;
        }
 
@@ -535,7 +536,7 @@ static int esw_create_offloads_table(struct mlx5_eswitch *esw)
        ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
        if (!ns) {
                esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
-               return -ENOMEM;
+               return -EOPNOTSUPP;
        }
 
        ft_offloads = mlx5_create_flow_table(ns, 0, dev->priv.sriov.num_vfs + 2, 0, 0);
@@ -655,7 +656,7 @@ static int esw_offloads_start(struct mlx5_eswitch *esw)
                esw_warn(esw->dev, "Failed setting eswitch to offloads, err %d\n", err);
                err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
                if (err1)
-                       esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err);
+                       esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err1);
        }
        if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
                if (mlx5_eswitch_inline_mode_get(esw,
@@ -674,9 +675,14 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
        int vport;
        int err;
 
+       /* disable PF RoCE so missed packets don't go through RoCE steering */
+       mlx5_dev_list_lock();
+       mlx5_remove_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
+       mlx5_dev_list_unlock();
+
        err = esw_create_offloads_fdb_table(esw, nvports);
        if (err)
-               return err;
+               goto create_fdb_err;
 
        err = esw_create_offloads_table(esw);
        if (err)
@@ -696,11 +702,6 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
                        goto err_reps;
        }
 
-       /* disable PF RoCE so missed packets don't go through RoCE steering */
-       mlx5_dev_list_lock();
-       mlx5_remove_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
-       mlx5_dev_list_unlock();
-
        return 0;
 
 err_reps:
@@ -717,6 +718,13 @@ create_fg_err:
 
 create_ft_err:
        esw_destroy_offloads_fdb_table(esw);
+
+create_fdb_err:
+       /* enable back PF RoCE */
+       mlx5_dev_list_lock();
+       mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
+       mlx5_dev_list_unlock();
+
        return err;
 }
 
@@ -724,11 +732,6 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw)
 {
        int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
 
-       /* enable back PF RoCE */
-       mlx5_dev_list_lock();
-       mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
-       mlx5_dev_list_unlock();
-
        mlx5_eswitch_disable_sriov(esw);
        err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
        if (err) {
@@ -738,6 +741,11 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw)
                        esw_warn(esw->dev, "Failed setting eswitch back to offloads, err %d\n", err);
        }
 
+       /* enable back PF RoCE */
+       mlx5_dev_list_lock();
+       mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
+       mlx5_dev_list_unlock();
+
        return err;
 }
 
index c4478ecd8056e42de2c359eb7a2abfd9e6400090..b53fc85a2375778ddd02ac07d21d88b56c49e432 100644 (file)
@@ -322,7 +322,7 @@ int mlx5_cmd_update_fte(struct mlx5_core_dev *dev,
                                                flow_table_properties_nic_receive.
                                                flow_modify_en);
        if (!atomic_mod_cap)
-               return -ENOTSUPP;
+               return -EOPNOTSUPP;
        opmod = 1;
 
        return  mlx5_cmd_set_fte(dev, opmod, modify_mask, ft, group_id, fte);
index 0ac7a2fc916c438bc535b20d45964009747f0b33..6346a8f5883bcc911ef422cf572fd1891ddf73c9 100644 (file)
@@ -1822,7 +1822,7 @@ static int create_anchor_flow_table(struct mlx5_flow_steering *steering)
        struct mlx5_flow_table *ft;
 
        ns = mlx5_get_flow_namespace(steering->dev, MLX5_FLOW_NAMESPACE_ANCHOR);
-       if (!ns)
+       if (WARN_ON(!ns))
                return -EINVAL;
        ft = mlx5_create_flow_table(ns, ANCHOR_PRIO, ANCHOR_SIZE, ANCHOR_LEVEL, 0);
        if (IS_ERR(ft)) {
index d01e9f21d4691ea497aa7ea0666c83e330c078bb..3c315eb8d270f6f94ecaea2c8ee4d78ed1244658 100644 (file)
@@ -807,7 +807,7 @@ static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
                return 0;
        }
 
-       return -ENOTSUPP;
+       return -EOPNOTSUPP;
 }
 
 
index d2ec9d232a70727df71d0c733f60c78a55415392..fd12e0a377a567c693c7f174d7762dd6071ff925 100644 (file)
@@ -620,7 +620,7 @@ static int mlx5_set_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *in,
        u32 out[MLX5_ST_SZ_DW(qtct_reg)];
 
        if (!MLX5_CAP_GEN(mdev, ets))
-               return -ENOTSUPP;
+               return -EOPNOTSUPP;
 
        return mlx5_core_access_reg(mdev, in, inlen, out, sizeof(out),
                                    MLX5_REG_QETCR, 0, 1);
@@ -632,7 +632,7 @@ static int mlx5_query_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *out,
        u32 in[MLX5_ST_SZ_DW(qtct_reg)];
 
        if (!MLX5_CAP_GEN(mdev, ets))
-               return -ENOTSUPP;
+               return -EOPNOTSUPP;
 
        memset(in, 0, sizeof(in));
        return mlx5_core_access_reg(mdev, in, sizeof(in), out, outlen,
index 269e4401c342d1375e70a40ba9905dddf9b65cef..7129c30a2ab477d23be1b8b8d34e7190618e0f9f 100644 (file)
@@ -532,7 +532,7 @@ int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
        if (!MLX5_CAP_GEN(mdev, vport_group_manager))
                return -EACCES;
        if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify))
-               return -ENOTSUPP;
+               return -EOPNOTSUPP;
 
        in = mlx5_vzalloc(inlen);
        if (!in)
index 01d0efa9c5c7419b6e2fa99ed6b99562811bcbd5..9e494a446b7ea7812a409b87f92bde19a0355286 100644 (file)
@@ -1172,7 +1172,8 @@ static int mlxsw_sp_nexthop_mac_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
 
 static int
 mlxsw_sp_nexthop_group_mac_update(struct mlxsw_sp *mlxsw_sp,
-                                 struct mlxsw_sp_nexthop_group *nh_grp)
+                                 struct mlxsw_sp_nexthop_group *nh_grp,
+                                 bool reallocate)
 {
        u32 adj_index = nh_grp->adj_index; /* base */
        struct mlxsw_sp_nexthop *nh;
@@ -1187,7 +1188,7 @@ mlxsw_sp_nexthop_group_mac_update(struct mlxsw_sp *mlxsw_sp,
                        continue;
                }
 
-               if (nh->update) {
+               if (nh->update || reallocate) {
                        err = mlxsw_sp_nexthop_mac_update(mlxsw_sp,
                                                          adj_index, nh);
                        if (err)
@@ -1248,7 +1249,8 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
                /* Nothing was added or removed, so no need to reallocate. Just
                 * update MAC on existing adjacency indexes.
                 */
-               err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp);
+               err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp,
+                                                       false);
                if (err) {
                        dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
                        goto set_trap;
@@ -1276,7 +1278,7 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
        nh_grp->adj_index_valid = 1;
        nh_grp->adj_index = adj_index;
        nh_grp->ecmp_size = ecmp_size;
-       err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp);
+       err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp, true);
        if (err) {
                dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
                goto set_trap;
index 8e5cb7605b0fa92d8d778e2c16cae49cdc2570db..873ce2cd76ba0540e501cf7455e8b562fb00c0e3 100644 (file)
@@ -297,7 +297,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
                list_del(&p_pkt->list_entry);
                b_last_packet = list_empty(&p_tx->active_descq);
                list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
-               if (p_ll2_conn->conn_type == QED_LL2_TYPE_ISCSI_OOO) {
+               if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO) {
                        struct qed_ooo_buffer *p_buffer;
 
                        p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
@@ -309,7 +309,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
                        b_last_frag =
                                p_tx->cur_completing_bd_idx == p_pkt->bd_used;
                        tx_frag = p_pkt->bds_set[0].tx_frag;
-                       if (p_ll2_conn->gsi_enable)
+                       if (p_ll2_conn->conn.gsi_enable)
                                qed_ll2b_release_tx_gsi_packet(p_hwfn,
                                                               p_ll2_conn->
                                                               my_id,
@@ -378,7 +378,7 @@ static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
 
                spin_unlock_irqrestore(&p_tx->lock, flags);
                tx_frag = p_pkt->bds_set[0].tx_frag;
-               if (p_ll2_conn->gsi_enable)
+               if (p_ll2_conn->conn.gsi_enable)
                        qed_ll2b_complete_tx_gsi_packet(p_hwfn,
                                                        p_ll2_conn->my_id,
                                                        p_pkt->cookie,
@@ -550,7 +550,7 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
 
                list_move_tail(&p_pkt->list_entry, &p_rx->free_descq);
 
-               if (p_ll2_conn->conn_type == QED_LL2_TYPE_ISCSI_OOO) {
+               if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO) {
                        struct qed_ooo_buffer *p_buffer;
 
                        p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
@@ -738,7 +738,7 @@ qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn,
                rc = qed_ll2_prepare_tx_packet(p_hwfn, p_ll2_conn->my_id, 1,
                                               p_buffer->vlan, bd_flags,
                                               l4_hdr_offset_w,
-                                              p_ll2_conn->tx_dest, 0,
+                                              p_ll2_conn->conn.tx_dest, 0,
                                               first_frag,
                                               p_buffer->packet_length,
                                               p_buffer, true);
@@ -858,7 +858,7 @@ qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn,
        u16 buf_idx;
        int rc = 0;
 
-       if (p_ll2_info->conn_type != QED_LL2_TYPE_ISCSI_OOO)
+       if (p_ll2_info->conn.conn_type != QED_LL2_TYPE_ISCSI_OOO)
                return rc;
 
        if (!rx_num_ooo_buffers)
@@ -901,7 +901,7 @@ static void
 qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn,
                                 struct qed_ll2_info *p_ll2_conn)
 {
-       if (p_ll2_conn->conn_type != QED_LL2_TYPE_ISCSI_OOO)
+       if (p_ll2_conn->conn.conn_type != QED_LL2_TYPE_ISCSI_OOO)
                return;
 
        qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
@@ -913,7 +913,7 @@ static void qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn,
 {
        struct qed_ooo_buffer *p_buffer;
 
-       if (p_ll2_conn->conn_type != QED_LL2_TYPE_ISCSI_OOO)
+       if (p_ll2_conn->conn.conn_type != QED_LL2_TYPE_ISCSI_OOO)
                return;
 
        qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
@@ -945,23 +945,19 @@ static int qed_ll2_start_ooo(struct qed_dev *cdev,
 {
        struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
        u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
-       struct qed_ll2_info *ll2_info;
+       struct qed_ll2_conn ll2_info;
        int rc;
 
-       ll2_info = kzalloc(sizeof(*ll2_info), GFP_KERNEL);
-       if (!ll2_info)
-               return -ENOMEM;
-       ll2_info->conn_type = QED_LL2_TYPE_ISCSI_OOO;
-       ll2_info->mtu = params->mtu;
-       ll2_info->rx_drop_ttl0_flg = params->drop_ttl0_packets;
-       ll2_info->rx_vlan_removal_en = params->rx_vlan_stripping;
-       ll2_info->tx_tc = OOO_LB_TC;
-       ll2_info->tx_dest = CORE_TX_DEST_LB;
-
-       rc = qed_ll2_acquire_connection(hwfn, ll2_info,
+       ll2_info.conn_type = QED_LL2_TYPE_ISCSI_OOO;
+       ll2_info.mtu = params->mtu;
+       ll2_info.rx_drop_ttl0_flg = params->drop_ttl0_packets;
+       ll2_info.rx_vlan_removal_en = params->rx_vlan_stripping;
+       ll2_info.tx_tc = OOO_LB_TC;
+       ll2_info.tx_dest = CORE_TX_DEST_LB;
+
+       rc = qed_ll2_acquire_connection(hwfn, &ll2_info,
                                        QED_LL2_RX_SIZE, QED_LL2_TX_SIZE,
                                        handle);
-       kfree(ll2_info);
        if (rc) {
                DP_INFO(cdev, "Failed to acquire LL2 OOO connection\n");
                goto out;
@@ -1006,7 +1002,7 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
                                     struct qed_ll2_info *p_ll2_conn,
                                     u8 action_on_error)
 {
-       enum qed_ll2_conn_type conn_type = p_ll2_conn->conn_type;
+       enum qed_ll2_conn_type conn_type = p_ll2_conn->conn.conn_type;
        struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
        struct core_rx_start_ramrod_data *p_ramrod = NULL;
        struct qed_spq_entry *p_ent = NULL;
@@ -1032,7 +1028,7 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
        p_ramrod->sb_index = p_rx->rx_sb_index;
        p_ramrod->complete_event_flg = 1;
 
-       p_ramrod->mtu = cpu_to_le16(p_ll2_conn->mtu);
+       p_ramrod->mtu = cpu_to_le16(p_ll2_conn->conn.mtu);
        DMA_REGPAIR_LE(p_ramrod->bd_base,
                       p_rx->rxq_chain.p_phys_addr);
        cqe_pbl_size = (u16)qed_chain_get_page_cnt(&p_rx->rcq_chain);
@@ -1040,8 +1036,8 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
        DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr,
                       qed_chain_get_pbl_phys(&p_rx->rcq_chain));
 
-       p_ramrod->drop_ttl0_flg = p_ll2_conn->rx_drop_ttl0_flg;
-       p_ramrod->inner_vlan_removal_en = p_ll2_conn->rx_vlan_removal_en;
+       p_ramrod->drop_ttl0_flg = p_ll2_conn->conn.rx_drop_ttl0_flg;
+       p_ramrod->inner_vlan_removal_en = p_ll2_conn->conn.rx_vlan_removal_en;
        p_ramrod->queue_id = p_ll2_conn->queue_id;
        p_ramrod->main_func_queue = (conn_type == QED_LL2_TYPE_ISCSI_OOO) ? 0
                                                                          : 1;
@@ -1056,14 +1052,14 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
        }
 
        p_ramrod->action_on_error.error_type = action_on_error;
-       p_ramrod->gsi_offload_flag = p_ll2_conn->gsi_enable;
+       p_ramrod->gsi_offload_flag = p_ll2_conn->conn.gsi_enable;
        return qed_spq_post(p_hwfn, p_ent, NULL);
 }
 
 static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
                                     struct qed_ll2_info *p_ll2_conn)
 {
-       enum qed_ll2_conn_type conn_type = p_ll2_conn->conn_type;
+       enum qed_ll2_conn_type conn_type = p_ll2_conn->conn.conn_type;
        struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
        struct core_tx_start_ramrod_data *p_ramrod = NULL;
        struct qed_spq_entry *p_ent = NULL;
@@ -1075,7 +1071,7 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
        if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
                return 0;
 
-       if (p_ll2_conn->conn_type == QED_LL2_TYPE_ISCSI_OOO)
+       if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO)
                p_ll2_conn->tx_stats_en = 0;
        else
                p_ll2_conn->tx_stats_en = 1;
@@ -1096,7 +1092,7 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
 
        p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
        p_ramrod->sb_index = p_tx->tx_sb_index;
-       p_ramrod->mtu = cpu_to_le16(p_ll2_conn->mtu);
+       p_ramrod->mtu = cpu_to_le16(p_ll2_conn->conn.mtu);
        p_ramrod->stats_en = p_ll2_conn->tx_stats_en;
        p_ramrod->stats_id = p_ll2_conn->tx_stats_id;
 
@@ -1106,7 +1102,7 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
        p_ramrod->pbl_size = cpu_to_le16(pbl_size);
 
        memset(&pq_params, 0, sizeof(pq_params));
-       pq_params.core.tc = p_ll2_conn->tx_tc;
+       pq_params.core.tc = p_ll2_conn->conn.tx_tc;
        pq_id = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
        p_ramrod->qm_pq_id = cpu_to_le16(pq_id);
 
@@ -1123,7 +1119,7 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
                DP_NOTICE(p_hwfn, "Unknown connection type: %d\n", conn_type);
        }
 
-       p_ramrod->gsi_offload_flag = p_ll2_conn->gsi_enable;
+       p_ramrod->gsi_offload_flag = p_ll2_conn->conn.gsi_enable;
        return qed_spq_post(p_hwfn, p_ent, NULL);
 }
 
@@ -1224,7 +1220,7 @@ qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn,
 
        DP_VERBOSE(p_hwfn, QED_MSG_LL2,
                   "Allocated LL2 Rxq [Type %08x] with 0x%08x buffers\n",
-                  p_ll2_info->conn_type, rx_num_desc);
+                  p_ll2_info->conn.conn_type, rx_num_desc);
 
 out:
        return rc;
@@ -1262,7 +1258,7 @@ static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn,
 
        DP_VERBOSE(p_hwfn, QED_MSG_LL2,
                   "Allocated LL2 Txq [Type %08x] with 0x%08x buffers\n",
-                  p_ll2_info->conn_type, tx_num_desc);
+                  p_ll2_info->conn.conn_type, tx_num_desc);
 
 out:
        if (rc)
@@ -1273,7 +1269,7 @@ out:
 }
 
 int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn,
-                              struct qed_ll2_info *p_params,
+                              struct qed_ll2_conn *p_params,
                               u16 rx_num_desc,
                               u16 tx_num_desc,
                               u8 *p_connection_handle)
@@ -1302,15 +1298,7 @@ int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn,
        if (!p_ll2_info)
                return -EBUSY;
 
-       p_ll2_info->conn_type = p_params->conn_type;
-       p_ll2_info->mtu = p_params->mtu;
-       p_ll2_info->rx_drop_ttl0_flg = p_params->rx_drop_ttl0_flg;
-       p_ll2_info->rx_vlan_removal_en = p_params->rx_vlan_removal_en;
-       p_ll2_info->tx_tc = p_params->tx_tc;
-       p_ll2_info->tx_dest = p_params->tx_dest;
-       p_ll2_info->ai_err_packet_too_big = p_params->ai_err_packet_too_big;
-       p_ll2_info->ai_err_no_buf = p_params->ai_err_no_buf;
-       p_ll2_info->gsi_enable = p_params->gsi_enable;
+       p_ll2_info->conn = *p_params;
 
        rc = qed_ll2_acquire_connection_rx(p_hwfn, p_ll2_info, rx_num_desc);
        if (rc)
@@ -1371,9 +1359,9 @@ static int qed_ll2_establish_connection_rx(struct qed_hwfn *p_hwfn,
 
        SET_FIELD(action_on_error,
                  CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG,
-                 p_ll2_conn->ai_err_packet_too_big);
+                 p_ll2_conn->conn.ai_err_packet_too_big);
        SET_FIELD(action_on_error,
-                 CORE_RX_ACTION_ON_ERROR_NO_BUFF, p_ll2_conn->ai_err_no_buf);
+                 CORE_RX_ACTION_ON_ERROR_NO_BUFF, p_ll2_conn->conn.ai_err_no_buf);
 
        return qed_sp_ll2_rx_queue_start(p_hwfn, p_ll2_conn, action_on_error);
 }
@@ -1600,7 +1588,7 @@ static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
                   "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n",
                   p_ll2->queue_id,
                   p_ll2->cid,
-                  p_ll2->conn_type,
+                  p_ll2->conn.conn_type,
                   prod_idx,
                   first_frag_len,
                   num_of_bds,
@@ -1676,7 +1664,7 @@ static void qed_ll2_tx_packet_notify(struct qed_hwfn *p_hwfn,
                   (NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
                   "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Doorbelled [producer 0x%04x]\n",
                   p_ll2_conn->queue_id,
-                  p_ll2_conn->cid, p_ll2_conn->conn_type, db_msg.spq_prod);
+                  p_ll2_conn->cid, p_ll2_conn->conn.conn_type, db_msg.spq_prod);
 }
 
 int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
@@ -1817,7 +1805,7 @@ int qed_ll2_terminate_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
                qed_ll2_rxq_flush(p_hwfn, connection_handle);
        }
 
-       if (p_ll2_conn->conn_type == QED_LL2_TYPE_ISCSI_OOO)
+       if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO)
                qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
 
        return rc;
@@ -1993,7 +1981,7 @@ static void qed_ll2_register_cb_ops(struct qed_dev *cdev,
 
 static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
 {
-       struct qed_ll2_info ll2_info;
+       struct qed_ll2_conn ll2_info;
        struct qed_ll2_buffer *buffer, *tmp_buffer;
        enum qed_ll2_conn_type conn_type;
        struct qed_ptt *p_ptt;
@@ -2041,6 +2029,7 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
 
        /* Prepare the temporary ll2 information */
        memset(&ll2_info, 0, sizeof(ll2_info));
+
        ll2_info.conn_type = conn_type;
        ll2_info.mtu = params->mtu;
        ll2_info.rx_drop_ttl0_flg = params->drop_ttl0_packets;
@@ -2120,7 +2109,6 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
        }
 
        ether_addr_copy(cdev->ll2_mac_address, params->ll2_mac_address);
-
        return 0;
 
 release_terminate_all:
index 6625a3ae5a335bcace6f99b0e3914e5472bdc06d..31417928b6354f66d7f55c28d9e52565f9f3773d 100644 (file)
@@ -112,15 +112,8 @@ struct qed_ll2_tx_queue {
        bool b_completing_packet;
 };
 
-struct qed_ll2_info {
-       /* Lock protecting the state of LL2 */
-       struct mutex mutex;
+struct qed_ll2_conn {
        enum qed_ll2_conn_type conn_type;
-       u32 cid;
-       u8 my_id;
-       u8 queue_id;
-       u8 tx_stats_id;
-       bool b_active;
        u16 mtu;
        u8 rx_drop_ttl0_flg;
        u8 rx_vlan_removal_en;
@@ -128,10 +121,21 @@ struct qed_ll2_info {
        enum core_tx_dest tx_dest;
        enum core_error_handle ai_err_packet_too_big;
        enum core_error_handle ai_err_no_buf;
+       u8 gsi_enable;
+};
+
+struct qed_ll2_info {
+       /* Lock protecting the state of LL2 */
+       struct mutex mutex;
+       struct qed_ll2_conn conn;
+       u32 cid;
+       u8 my_id;
+       u8 queue_id;
+       u8 tx_stats_id;
+       bool b_active;
        u8 tx_stats_en;
        struct qed_ll2_rx_queue rx_queue;
        struct qed_ll2_tx_queue tx_queue;
-       u8 gsi_enable;
 };
 
 /**
@@ -149,7 +153,7 @@ struct qed_ll2_info {
  * @return 0 on success, failure otherwise
  */
 int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn,
-                              struct qed_ll2_info *p_params,
+                              struct qed_ll2_conn *p_params,
                               u16 rx_num_desc,
                               u16 tx_num_desc,
                               u8 *p_connection_handle);
index 2a16547c89661f1a35b8a575283cc39a5eaf7f83..2dbdb329899187d021b1f74b63a9948835ec523d 100644 (file)
@@ -2632,7 +2632,7 @@ static int qed_roce_ll2_start(struct qed_dev *cdev,
 {
        struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
        struct qed_roce_ll2_info *roce_ll2;
-       struct qed_ll2_info ll2_params;
+       struct qed_ll2_conn ll2_params;
        int rc;
 
        if (!params) {
index 89ac1e3f617599238d35fa444ff66c20abd48dc6..301f48755093bb084ce7741b24091b4c14eea67b 100644 (file)
@@ -179,6 +179,49 @@ static struct mdiobb_ops bb_ops = {
        .get_mdio_data = ravb_get_mdio_data,
 };
 
+/* Free TX skb function for AVB-IP */
+static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only)
+{
+       struct ravb_private *priv = netdev_priv(ndev);
+       struct net_device_stats *stats = &priv->stats[q];
+       struct ravb_tx_desc *desc;
+       int free_num = 0;
+       int entry;
+       u32 size;
+
+       for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) {
+               bool txed;
+
+               entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] *
+                                            NUM_TX_DESC);
+               desc = &priv->tx_ring[q][entry];
+               txed = desc->die_dt == DT_FEMPTY;
+               if (free_txed_only && !txed)
+                       break;
+               /* Descriptor type must be checked before all other reads */
+               dma_rmb();
+               size = le16_to_cpu(desc->ds_tagl) & TX_DS;
+               /* Free the original skb. */
+               if (priv->tx_skb[q][entry / NUM_TX_DESC]) {
+                       dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
+                                        size, DMA_TO_DEVICE);
+                       /* Last packet descriptor? */
+                       if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) {
+                               entry /= NUM_TX_DESC;
+                               dev_kfree_skb_any(priv->tx_skb[q][entry]);
+                               priv->tx_skb[q][entry] = NULL;
+                               if (txed)
+                                       stats->tx_packets++;
+                       }
+                       free_num++;
+               }
+               if (txed)
+                       stats->tx_bytes += size;
+               desc->die_dt = DT_EEMPTY;
+       }
+       return free_num;
+}
+
 /* Free skb's and DMA buffers for Ethernet AVB */
 static void ravb_ring_free(struct net_device *ndev, int q)
 {
@@ -194,19 +237,21 @@ static void ravb_ring_free(struct net_device *ndev, int q)
        kfree(priv->rx_skb[q]);
        priv->rx_skb[q] = NULL;
 
-       /* Free TX skb ringbuffer */
-       if (priv->tx_skb[q]) {
-               for (i = 0; i < priv->num_tx_ring[q]; i++)
-                       dev_kfree_skb(priv->tx_skb[q][i]);
-       }
-       kfree(priv->tx_skb[q]);
-       priv->tx_skb[q] = NULL;
-
        /* Free aligned TX buffers */
        kfree(priv->tx_align[q]);
        priv->tx_align[q] = NULL;
 
        if (priv->rx_ring[q]) {
+               for (i = 0; i < priv->num_rx_ring[q]; i++) {
+                       struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i];
+
+                       if (!dma_mapping_error(ndev->dev.parent,
+                                              le32_to_cpu(desc->dptr)))
+                               dma_unmap_single(ndev->dev.parent,
+                                                le32_to_cpu(desc->dptr),
+                                                PKT_BUF_SZ,
+                                                DMA_FROM_DEVICE);
+               }
                ring_size = sizeof(struct ravb_ex_rx_desc) *
                            (priv->num_rx_ring[q] + 1);
                dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q],
@@ -215,12 +260,20 @@ static void ravb_ring_free(struct net_device *ndev, int q)
        }
 
        if (priv->tx_ring[q]) {
+               ravb_tx_free(ndev, q, false);
+
                ring_size = sizeof(struct ravb_tx_desc) *
                            (priv->num_tx_ring[q] * NUM_TX_DESC + 1);
                dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q],
                                  priv->tx_desc_dma[q]);
                priv->tx_ring[q] = NULL;
        }
+
+       /* Free TX skb ringbuffer.
+        * SKBs are freed by ravb_tx_free() call above.
+        */
+       kfree(priv->tx_skb[q]);
+       priv->tx_skb[q] = NULL;
 }
 
 /* Format skb and descriptor buffer for Ethernet AVB */
@@ -431,44 +484,6 @@ static int ravb_dmac_init(struct net_device *ndev)
        return 0;
 }
 
-/* Free TX skb function for AVB-IP */
-static int ravb_tx_free(struct net_device *ndev, int q)
-{
-       struct ravb_private *priv = netdev_priv(ndev);
-       struct net_device_stats *stats = &priv->stats[q];
-       struct ravb_tx_desc *desc;
-       int free_num = 0;
-       int entry;
-       u32 size;
-
-       for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) {
-               entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] *
-                                            NUM_TX_DESC);
-               desc = &priv->tx_ring[q][entry];
-               if (desc->die_dt != DT_FEMPTY)
-                       break;
-               /* Descriptor type must be checked before all other reads */
-               dma_rmb();
-               size = le16_to_cpu(desc->ds_tagl) & TX_DS;
-               /* Free the original skb. */
-               if (priv->tx_skb[q][entry / NUM_TX_DESC]) {
-                       dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
-                                        size, DMA_TO_DEVICE);
-                       /* Last packet descriptor? */
-                       if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) {
-                               entry /= NUM_TX_DESC;
-                               dev_kfree_skb_any(priv->tx_skb[q][entry]);
-                               priv->tx_skb[q][entry] = NULL;
-                               stats->tx_packets++;
-                       }
-                       free_num++;
-               }
-               stats->tx_bytes += size;
-               desc->die_dt = DT_EEMPTY;
-       }
-       return free_num;
-}
-
 static void ravb_get_tx_tstamp(struct net_device *ndev)
 {
        struct ravb_private *priv = netdev_priv(ndev);
@@ -902,7 +917,7 @@ static int ravb_poll(struct napi_struct *napi, int budget)
                        spin_lock_irqsave(&priv->lock, flags);
                        /* Clear TX interrupt */
                        ravb_write(ndev, ~mask, TIS);
-                       ravb_tx_free(ndev, q);
+                       ravb_tx_free(ndev, q, true);
                        netif_wake_subqueue(ndev, q);
                        mmiowb();
                        spin_unlock_irqrestore(&priv->lock, flags);
@@ -1567,7 +1582,8 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 
        priv->cur_tx[q] += NUM_TX_DESC;
        if (priv->cur_tx[q] - priv->dirty_tx[q] >
-           (priv->num_tx_ring[q] - 1) * NUM_TX_DESC && !ravb_tx_free(ndev, q))
+           (priv->num_tx_ring[q] - 1) * NUM_TX_DESC &&
+           !ravb_tx_free(ndev, q, true))
                netif_stop_subqueue(ndev, q);
 
 exit:
index be3c91c7f211d94ad7386b77de73676933a46dcd..5484fd726d5af7f5f10708c57d062b9992be655d 100644 (file)
@@ -305,8 +305,12 @@ static int dwmac1000_irq_status(struct mac_device_info *hw,
 {
        void __iomem *ioaddr = hw->pcsr;
        u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
+       u32 intr_mask = readl(ioaddr + GMAC_INT_MASK);
        int ret = 0;
 
+       /* Discard masked bits */
+       intr_status &= ~intr_mask;
+
        /* Not used events (e.g. MMC interrupts) are not handled. */
        if ((intr_status & GMAC_INT_STATUS_MMCTIS))
                x->mmc_tx_irq_n++;
index 082cd48db6a769b43507be7442ca6d9ff0099c5f..36942f5a6a53adfc20bd254b0045fad51e398c12 100644 (file)
@@ -351,6 +351,7 @@ void stmmac_remove_config_dt(struct platform_device *pdev,
        if (of_phy_is_fixed_link(np))
                of_phy_deregister_fixed_link(np);
        of_node_put(plat->phy_node);
+       of_node_put(plat->mdio_node);
 }
 #else
 struct plat_stmmacenet_data *
index b203143647e6c0c39ae17b722366e52fa8f3ade8..65088224c207d9b6155192281b551cc823045b48 100644 (file)
@@ -3160,7 +3160,7 @@ static int cpsw_resume(struct device *dev)
 {
        struct platform_device  *pdev = to_platform_device(dev);
        struct net_device       *ndev = platform_get_drvdata(pdev);
-       struct cpsw_common      *cpsw = netdev_priv(ndev);
+       struct cpsw_common      *cpsw = ndev_to_cpsw(ndev);
 
        /* Select default pin state */
        pinctrl_pm_select_default_state(dev);
index 93dc10b10c0901c8974c4990eb87a22d6411e690..aa02a03a6d8db22996941cd53d4d12968a4e0ee2 100644 (file)
 /* BUFFER_ALIGN(adr) calculates the number of bytes to the next alignment. */
 #define BUFFER_ALIGN(adr) ((ALIGNMENT - ((u32) adr)) % ALIGNMENT)
 
+#ifdef __BIG_ENDIAN
+#define xemaclite_readl                ioread32be
+#define xemaclite_writel       iowrite32be
+#else
+#define xemaclite_readl                ioread32
+#define xemaclite_writel       iowrite32
+#endif
+
 /**
  * struct net_local - Our private per device data
  * @ndev:              instance of the network device
@@ -156,15 +164,15 @@ static void xemaclite_enable_interrupts(struct net_local *drvdata)
        u32 reg_data;
 
        /* Enable the Tx interrupts for the first Buffer */
-       reg_data = __raw_readl(drvdata->base_addr + XEL_TSR_OFFSET);
-       __raw_writel(reg_data | XEL_TSR_XMIT_IE_MASK,
-                    drvdata->base_addr + XEL_TSR_OFFSET);
+       reg_data = xemaclite_readl(drvdata->base_addr + XEL_TSR_OFFSET);
+       xemaclite_writel(reg_data | XEL_TSR_XMIT_IE_MASK,
+                        drvdata->base_addr + XEL_TSR_OFFSET);
 
        /* Enable the Rx interrupts for the first buffer */
-       __raw_writel(XEL_RSR_RECV_IE_MASK, drvdata->base_addr + XEL_RSR_OFFSET);
+       xemaclite_writel(XEL_RSR_RECV_IE_MASK, drvdata->base_addr + XEL_RSR_OFFSET);
 
        /* Enable the Global Interrupt Enable */
-       __raw_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET);
+       xemaclite_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET);
 }
 
 /**
@@ -179,17 +187,17 @@ static void xemaclite_disable_interrupts(struct net_local *drvdata)
        u32 reg_data;
 
        /* Disable the Global Interrupt Enable */
-       __raw_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET);
+       xemaclite_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET);
 
        /* Disable the Tx interrupts for the first buffer */
-       reg_data = __raw_readl(drvdata->base_addr + XEL_TSR_OFFSET);
-       __raw_writel(reg_data & (~XEL_TSR_XMIT_IE_MASK),
-                    drvdata->base_addr + XEL_TSR_OFFSET);
+       reg_data = xemaclite_readl(drvdata->base_addr + XEL_TSR_OFFSET);
+       xemaclite_writel(reg_data & (~XEL_TSR_XMIT_IE_MASK),
+                        drvdata->base_addr + XEL_TSR_OFFSET);
 
        /* Disable the Rx interrupts for the first buffer */
-       reg_data = __raw_readl(drvdata->base_addr + XEL_RSR_OFFSET);
-       __raw_writel(reg_data & (~XEL_RSR_RECV_IE_MASK),
-                    drvdata->base_addr + XEL_RSR_OFFSET);
+       reg_data = xemaclite_readl(drvdata->base_addr + XEL_RSR_OFFSET);
+       xemaclite_writel(reg_data & (~XEL_RSR_RECV_IE_MASK),
+                        drvdata->base_addr + XEL_RSR_OFFSET);
 }
 
 /**
@@ -321,7 +329,7 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
                byte_count = ETH_FRAME_LEN;
 
        /* Check if the expected buffer is available */
-       reg_data = __raw_readl(addr + XEL_TSR_OFFSET);
+       reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET);
        if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK |
             XEL_TSR_XMIT_ACTIVE_MASK)) == 0) {
 
@@ -334,7 +342,7 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
 
                addr = (void __iomem __force *)((u32 __force)addr ^
                                                 XEL_BUFFER_OFFSET);
-               reg_data = __raw_readl(addr + XEL_TSR_OFFSET);
+               reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET);
 
                if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK |
                     XEL_TSR_XMIT_ACTIVE_MASK)) != 0)
@@ -345,16 +353,16 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
        /* Write the frame to the buffer */
        xemaclite_aligned_write(data, (u32 __force *) addr, byte_count);
 
-       __raw_writel((byte_count & XEL_TPLR_LENGTH_MASK),
-                    addr + XEL_TPLR_OFFSET);
+       xemaclite_writel((byte_count & XEL_TPLR_LENGTH_MASK),
+                        addr + XEL_TPLR_OFFSET);
 
        /* Update the Tx Status Register to indicate that there is a
         * frame to send. Set the XEL_TSR_XMIT_ACTIVE_MASK flag which
         * is used by the interrupt handler to check whether a frame
         * has been transmitted */
-       reg_data = __raw_readl(addr + XEL_TSR_OFFSET);
+       reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET);
        reg_data |= (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_XMIT_ACTIVE_MASK);
-       __raw_writel(reg_data, addr + XEL_TSR_OFFSET);
+       xemaclite_writel(reg_data, addr + XEL_TSR_OFFSET);
 
        return 0;
 }
@@ -369,7 +377,7 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
  *
  * Return:     Total number of bytes received
  */
-static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data)
+static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen)
 {
        void __iomem *addr;
        u16 length, proto_type;
@@ -379,7 +387,7 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data)
        addr = (drvdata->base_addr + drvdata->next_rx_buf_to_use);
 
        /* Verify which buffer has valid data */
-       reg_data = __raw_readl(addr + XEL_RSR_OFFSET);
+       reg_data = xemaclite_readl(addr + XEL_RSR_OFFSET);
 
        if ((reg_data & XEL_RSR_RECV_DONE_MASK) == XEL_RSR_RECV_DONE_MASK) {
                if (drvdata->rx_ping_pong != 0)
@@ -396,27 +404,28 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data)
                        return 0;       /* No data was available */
 
                /* Verify that buffer has valid data */
-               reg_data = __raw_readl(addr + XEL_RSR_OFFSET);
+               reg_data = xemaclite_readl(addr + XEL_RSR_OFFSET);
                if ((reg_data & XEL_RSR_RECV_DONE_MASK) !=
                     XEL_RSR_RECV_DONE_MASK)
                        return 0;       /* No data was available */
        }
 
        /* Get the protocol type of the ethernet frame that arrived */
-       proto_type = ((ntohl(__raw_readl(addr + XEL_HEADER_OFFSET +
+       proto_type = ((ntohl(xemaclite_readl(addr + XEL_HEADER_OFFSET +
                        XEL_RXBUFF_OFFSET)) >> XEL_HEADER_SHIFT) &
                        XEL_RPLR_LENGTH_MASK);
 
        /* Check if received ethernet frame is a raw ethernet frame
         * or an IP packet or an ARP packet */
-       if (proto_type > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
+       if (proto_type > ETH_DATA_LEN) {
 
                if (proto_type == ETH_P_IP) {
-                       length = ((ntohl(__raw_readl(addr +
+                       length = ((ntohl(xemaclite_readl(addr +
                                        XEL_HEADER_IP_LENGTH_OFFSET +
                                        XEL_RXBUFF_OFFSET)) >>
                                        XEL_HEADER_SHIFT) &
                                        XEL_RPLR_LENGTH_MASK);
+                       length = min_t(u16, length, ETH_DATA_LEN);
                        length += ETH_HLEN + ETH_FCS_LEN;
 
                } else if (proto_type == ETH_P_ARP)
@@ -429,14 +438,17 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data)
                /* Use the length in the frame, plus the header and trailer */
                length = proto_type + ETH_HLEN + ETH_FCS_LEN;
 
+       if (WARN_ON(length > maxlen))
+               length = maxlen;
+
        /* Read from the EmacLite device */
        xemaclite_aligned_read((u32 __force *) (addr + XEL_RXBUFF_OFFSET),
                                data, length);
 
        /* Acknowledge the frame */
-       reg_data = __raw_readl(addr + XEL_RSR_OFFSET);
+       reg_data = xemaclite_readl(addr + XEL_RSR_OFFSET);
        reg_data &= ~XEL_RSR_RECV_DONE_MASK;
-       __raw_writel(reg_data, addr + XEL_RSR_OFFSET);
+       xemaclite_writel(reg_data, addr + XEL_RSR_OFFSET);
 
        return length;
 }
@@ -463,14 +475,14 @@ static void xemaclite_update_address(struct net_local *drvdata,
 
        xemaclite_aligned_write(address_ptr, (u32 __force *) addr, ETH_ALEN);
 
-       __raw_writel(ETH_ALEN, addr + XEL_TPLR_OFFSET);
+       xemaclite_writel(ETH_ALEN, addr + XEL_TPLR_OFFSET);
 
        /* Update the MAC address in the EmacLite */
-       reg_data = __raw_readl(addr + XEL_TSR_OFFSET);
-       __raw_writel(reg_data | XEL_TSR_PROG_MAC_ADDR, addr + XEL_TSR_OFFSET);
+       reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET);
+       xemaclite_writel(reg_data | XEL_TSR_PROG_MAC_ADDR, addr + XEL_TSR_OFFSET);
 
        /* Wait for EmacLite to finish with the MAC address update */
-       while ((__raw_readl(addr + XEL_TSR_OFFSET) &
+       while ((xemaclite_readl(addr + XEL_TSR_OFFSET) &
                XEL_TSR_PROG_MAC_ADDR) != 0)
                ;
 }
@@ -603,7 +615,7 @@ static void xemaclite_rx_handler(struct net_device *dev)
 
        skb_reserve(skb, 2);
 
-       len = xemaclite_recv_data(lp, (u8 *) skb->data);
+       len = xemaclite_recv_data(lp, (u8 *) skb->data, len);
 
        if (!len) {
                dev->stats.rx_errors++;
@@ -640,32 +652,32 @@ static irqreturn_t xemaclite_interrupt(int irq, void *dev_id)
        u32 tx_status;
 
        /* Check if there is Rx Data available */
-       if ((__raw_readl(base_addr + XEL_RSR_OFFSET) &
+       if ((xemaclite_readl(base_addr + XEL_RSR_OFFSET) &
                         XEL_RSR_RECV_DONE_MASK) ||
-           (__raw_readl(base_addr + XEL_BUFFER_OFFSET + XEL_RSR_OFFSET)
+           (xemaclite_readl(base_addr + XEL_BUFFER_OFFSET + XEL_RSR_OFFSET)
                         & XEL_RSR_RECV_DONE_MASK))
 
                xemaclite_rx_handler(dev);
 
        /* Check if the Transmission for the first buffer is completed */
-       tx_status = __raw_readl(base_addr + XEL_TSR_OFFSET);
+       tx_status = xemaclite_readl(base_addr + XEL_TSR_OFFSET);
        if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) &&
                (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) {
 
                tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK;
-               __raw_writel(tx_status, base_addr + XEL_TSR_OFFSET);
+               xemaclite_writel(tx_status, base_addr + XEL_TSR_OFFSET);
 
                tx_complete = true;
        }
 
        /* Check if the Transmission for the second buffer is completed */
-       tx_status = __raw_readl(base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET);
+       tx_status = xemaclite_readl(base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET);
        if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) &&
                (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) {
 
                tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK;
-               __raw_writel(tx_status, base_addr + XEL_BUFFER_OFFSET +
-                            XEL_TSR_OFFSET);
+               xemaclite_writel(tx_status, base_addr + XEL_BUFFER_OFFSET +
+                                XEL_TSR_OFFSET);
 
                tx_complete = true;
        }
@@ -698,7 +710,7 @@ static int xemaclite_mdio_wait(struct net_local *lp)
        /* wait for the MDIO interface to not be busy or timeout
           after some time.
        */
-       while (__raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET) &
+       while (xemaclite_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET) &
                        XEL_MDIOCTRL_MDIOSTS_MASK) {
                if (time_before_eq(end, jiffies)) {
                        WARN_ON(1);
@@ -734,17 +746,17 @@ static int xemaclite_mdio_read(struct mii_bus *bus, int phy_id, int reg)
         * MDIO Address register. Set the Status bit in the MDIO Control
         * register to start a MDIO read transaction.
         */
-       ctrl_reg = __raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET);
-       __raw_writel(XEL_MDIOADDR_OP_MASK |
-                    ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg),
-                    lp->base_addr + XEL_MDIOADDR_OFFSET);
-       __raw_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK,
-                    lp->base_addr + XEL_MDIOCTRL_OFFSET);
+       ctrl_reg = xemaclite_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET);
+       xemaclite_writel(XEL_MDIOADDR_OP_MASK |
+                        ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg),
+                        lp->base_addr + XEL_MDIOADDR_OFFSET);
+       xemaclite_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK,
+                        lp->base_addr + XEL_MDIOCTRL_OFFSET);
 
        if (xemaclite_mdio_wait(lp))
                return -ETIMEDOUT;
 
-       rc = __raw_readl(lp->base_addr + XEL_MDIORD_OFFSET);
+       rc = xemaclite_readl(lp->base_addr + XEL_MDIORD_OFFSET);
 
        dev_dbg(&lp->ndev->dev,
                "xemaclite_mdio_read(phy_id=%i, reg=%x) == %x\n",
@@ -781,13 +793,13 @@ static int xemaclite_mdio_write(struct mii_bus *bus, int phy_id, int reg,
         * Data register. Finally, set the Status bit in the MDIO Control
         * register to start a MDIO write transaction.
         */
-       ctrl_reg = __raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET);
-       __raw_writel(~XEL_MDIOADDR_OP_MASK &
-                    ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg),
-                    lp->base_addr + XEL_MDIOADDR_OFFSET);
-       __raw_writel(val, lp->base_addr + XEL_MDIOWR_OFFSET);
-       __raw_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK,
-                    lp->base_addr + XEL_MDIOCTRL_OFFSET);
+       ctrl_reg = xemaclite_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET);
+       xemaclite_writel(~XEL_MDIOADDR_OP_MASK &
+                        ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg),
+                        lp->base_addr + XEL_MDIOADDR_OFFSET);
+       xemaclite_writel(val, lp->base_addr + XEL_MDIOWR_OFFSET);
+       xemaclite_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK,
+                        lp->base_addr + XEL_MDIOCTRL_OFFSET);
 
        return 0;
 }
@@ -834,8 +846,8 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev)
        /* Enable the MDIO bus by asserting the enable bit in MDIO Control
         * register.
         */
-       __raw_writel(XEL_MDIOCTRL_MDIOEN_MASK,
-                    lp->base_addr + XEL_MDIOCTRL_OFFSET);
+       xemaclite_writel(XEL_MDIOCTRL_MDIOEN_MASK,
+                        lp->base_addr + XEL_MDIOCTRL_OFFSET);
 
        bus = mdiobus_alloc();
        if (!bus) {
@@ -1140,8 +1152,8 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
        }
 
        /* Clear the Tx CSR's in case this is a restart */
-       __raw_writel(0, lp->base_addr + XEL_TSR_OFFSET);
-       __raw_writel(0, lp->base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET);
+       xemaclite_writel(0, lp->base_addr + XEL_TSR_OFFSET);
+       xemaclite_writel(0, lp->base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET);
 
        /* Set the MAC address in the EmacLite device */
        xemaclite_update_address(lp, ndev->dev_addr);
index 8b6810bad54b73fc90da579521e1e20050933aac..99d3df788ce81e6f423c7458466812607df68ed1 100644 (file)
@@ -69,7 +69,6 @@ struct gtp_dev {
        struct socket           *sock0;
        struct socket           *sock1u;
 
-       struct net              *net;
        struct net_device       *dev;
 
        unsigned int            hash_size;
@@ -316,7 +315,7 @@ static int gtp_encap_recv(struct sock *sk, struct sk_buff *skb)
 
        netdev_dbg(gtp->dev, "encap_recv sk=%p\n", sk);
 
-       xnet = !net_eq(gtp->net, dev_net(gtp->dev));
+       xnet = !net_eq(sock_net(sk), dev_net(gtp->dev));
 
        switch (udp_sk(sk)->encap_type) {
        case UDP_ENCAP_GTP0:
@@ -612,7 +611,7 @@ static netdev_tx_t gtp_dev_xmit(struct sk_buff *skb, struct net_device *dev)
                                    pktinfo.fl4.saddr, pktinfo.fl4.daddr,
                                    pktinfo.iph->tos,
                                    ip4_dst_hoplimit(&pktinfo.rt->dst),
-                                   htons(IP_DF),
+                                   0,
                                    pktinfo.gtph_port, pktinfo.gtph_port,
                                    true, false);
                break;
@@ -658,7 +657,7 @@ static void gtp_link_setup(struct net_device *dev)
 static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize);
 static void gtp_hashtable_free(struct gtp_dev *gtp);
 static int gtp_encap_enable(struct net_device *dev, struct gtp_dev *gtp,
-                           int fd_gtp0, int fd_gtp1, struct net *src_net);
+                           int fd_gtp0, int fd_gtp1);
 
 static int gtp_newlink(struct net *src_net, struct net_device *dev,
                        struct nlattr *tb[], struct nlattr *data[])
@@ -675,7 +674,7 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,
        fd0 = nla_get_u32(data[IFLA_GTP_FD0]);
        fd1 = nla_get_u32(data[IFLA_GTP_FD1]);
 
-       err = gtp_encap_enable(dev, gtp, fd0, fd1, src_net);
+       err = gtp_encap_enable(dev, gtp, fd0, fd1);
        if (err < 0)
                goto out_err;
 
@@ -821,7 +820,7 @@ static void gtp_hashtable_free(struct gtp_dev *gtp)
 }
 
 static int gtp_encap_enable(struct net_device *dev, struct gtp_dev *gtp,
-                           int fd_gtp0, int fd_gtp1, struct net *src_net)
+                           int fd_gtp0, int fd_gtp1)
 {
        struct udp_tunnel_sock_cfg tuncfg = {NULL};
        struct socket *sock0, *sock1u;
@@ -858,7 +857,6 @@ static int gtp_encap_enable(struct net_device *dev, struct gtp_dev *gtp,
 
        gtp->sock0 = sock0;
        gtp->sock1u = sock1u;
-       gtp->net = src_net;
 
        tuncfg.sk_user_data = gtp;
        tuncfg.encap_rcv = gtp_encap_recv;
@@ -1376,3 +1374,4 @@ MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Harald Welte <hwelte@sysmocom.de>");
 MODULE_DESCRIPTION("Interface driver for GTP encapsulated traffic");
 MODULE_ALIAS_RTNL_LINK("gtp");
+MODULE_ALIAS_GENL_FAMILY("gtp");
index ece59c54a653348014b686882e28e5bf1b335e37..4a40a3d825b41c2758a086a40cb216d6165f1cfa 100644 (file)
@@ -648,8 +648,8 @@ static void ax_setup(struct net_device *dev)
 {
        /* Finish setting up the DEVICE info. */
        dev->mtu             = AX_MTU;
-       dev->hard_header_len = 0;
-       dev->addr_len        = 0;
+       dev->hard_header_len = AX25_MAX_HEADER_LEN;
+       dev->addr_len        = AX25_ADDR_LEN;
        dev->type            = ARPHRD_AX25;
        dev->tx_queue_len    = 10;
        dev->header_ops      = &ax25_header_ops;
index 5a1cc089acb7fd2e79c18876cd7951f6dfb6e747..86e5749226ef4cf65d6070bca1ab0d4be35bf2e0 100644 (file)
@@ -1295,6 +1295,9 @@ void netvsc_channel_cb(void *context)
        ndev = hv_get_drvdata(device);
        buffer = get_per_channel_state(channel);
 
+       /* commit_rd_index() -> hv_signal_on_read() needs this. */
+       init_cached_read_index(channel);
+
        do {
                desc = get_next_pkt_raw(channel);
                if (desc != NULL) {
@@ -1347,6 +1350,9 @@ void netvsc_channel_cb(void *context)
 
                        bufferlen = bytes_recvd;
                }
+
+               init_cached_read_index(channel);
+
        } while (1);
 
        if (bufferlen > NETVSC_PACKET_SIZE)
index 1e05b7c2d157a22e06d115bcdd1d268876be03a7..0844f849641346b092e83923c1d196709b355e4c 100644 (file)
@@ -164,6 +164,7 @@ static void loopback_setup(struct net_device *dev)
 {
        dev->mtu                = 64 * 1024;
        dev->hard_header_len    = ETH_HLEN;     /* 14   */
+       dev->min_header_len     = ETH_HLEN;     /* 14   */
        dev->addr_len           = ETH_ALEN;     /* 6    */
        dev->type               = ARPHRD_LOOPBACK;      /* 0x0001*/
        dev->flags              = IFF_LOOPBACK;
index 5c26653eceb5660c0cd12b1a6669a58525fe7207..c27011bbe30c52d2eb892ab0d86f8cf3d6f4deb9 100644 (file)
@@ -681,7 +681,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
        size_t linear;
 
        if (q->flags & IFF_VNET_HDR) {
-               vnet_hdr_len = q->vnet_hdr_sz;
+               vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
 
                err = -EINVAL;
                if (len < vnet_hdr_len)
@@ -820,12 +820,12 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
 
        if (q->flags & IFF_VNET_HDR) {
                struct virtio_net_hdr vnet_hdr;
-               vnet_hdr_len = q->vnet_hdr_sz;
+               vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
                if (iov_iter_count(iter) < vnet_hdr_len)
                        return -EINVAL;
 
                if (virtio_net_hdr_from_skb(skb, &vnet_hdr,
-                                           macvtap_is_little_endian(q)))
+                                           macvtap_is_little_endian(q), true))
                        BUG();
 
                if (copy_to_iter(&vnet_hdr, sizeof(vnet_hdr), iter) !=
index e741bf614c4eeca4f4c041bff53880a5ca8c7439..b0492ef2cdaa0d360928e3e65a14808e336c40ec 100644 (file)
@@ -21,6 +21,23 @@ MODULE_DESCRIPTION("Broadcom 63xx internal PHY driver");
 MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>");
 MODULE_LICENSE("GPL");
 
+static int bcm63xx_config_intr(struct phy_device *phydev)
+{
+       int reg, err;
+
+       reg = phy_read(phydev, MII_BCM63XX_IR);
+       if (reg < 0)
+               return reg;
+
+       if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+               reg &= ~MII_BCM63XX_IR_GMASK;
+       else
+               reg |= MII_BCM63XX_IR_GMASK;
+
+       err = phy_write(phydev, MII_BCM63XX_IR, reg);
+       return err;
+}
+
 static int bcm63xx_config_init(struct phy_device *phydev)
 {
        int reg, err;
@@ -55,7 +72,7 @@ static struct phy_driver bcm63xx_driver[] = {
        .config_aneg    = genphy_config_aneg,
        .read_status    = genphy_read_status,
        .ack_interrupt  = bcm_phy_ack_intr,
-       .config_intr    = bcm_phy_config_intr,
+       .config_intr    = bcm63xx_config_intr,
 }, {
        /* same phy as above, with just a different OUI */
        .phy_id         = 0x002bdc00,
@@ -67,7 +84,7 @@ static struct phy_driver bcm63xx_driver[] = {
        .config_aneg    = genphy_config_aneg,
        .read_status    = genphy_read_status,
        .ack_interrupt  = bcm_phy_ack_intr,
-       .config_intr    = bcm_phy_config_intr,
+       .config_intr    = bcm63xx_config_intr,
 } };
 
 module_phy_driver(bcm63xx_driver);
index 800b39f0627943343c4276de637b30be4692352f..a10d0e7fc5f7010537560552cd822e59fd2d8469 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/phy.h>
 
 #define TI_DP83848C_PHY_ID             0x20005ca0
+#define TI_DP83620_PHY_ID              0x20005ce0
 #define NS_DP83848C_PHY_ID             0x20005c90
 #define TLK10X_PHY_ID                  0x2000a210
 #define TI_DP83822_PHY_ID              0x2000a240
@@ -77,6 +78,7 @@ static int dp83848_config_intr(struct phy_device *phydev)
 static struct mdio_device_id __maybe_unused dp83848_tbl[] = {
        { TI_DP83848C_PHY_ID, 0xfffffff0 },
        { NS_DP83848C_PHY_ID, 0xfffffff0 },
+       { TI_DP83620_PHY_ID, 0xfffffff0 },
        { TLK10X_PHY_ID, 0xfffffff0 },
        { TI_DP83822_PHY_ID, 0xfffffff0 },
        { }
@@ -106,6 +108,7 @@ MODULE_DEVICE_TABLE(mdio, dp83848_tbl);
 static struct phy_driver dp83848_driver[] = {
        DP83848_PHY_DRIVER(TI_DP83848C_PHY_ID, "TI DP83848C 10/100 Mbps PHY"),
        DP83848_PHY_DRIVER(NS_DP83848C_PHY_ID, "NS DP83848C 10/100 Mbps PHY"),
+       DP83848_PHY_DRIVER(TI_DP83620_PHY_ID, "TI DP83620 10/100 Mbps PHY"),
        DP83848_PHY_DRIVER(TLK10X_PHY_ID, "TI TLK10X 10/100 Mbps PHY"),
        DP83848_PHY_DRIVER(TI_DP83822_PHY_ID, "TI DP83822 10/100 Mbps PHY"),
 };
index 0b78210c0fa74e88b2ef5e27d4a7e6c78e9fa45c..ed0d235cf850ea11564bb446f8c8fc53879a9e73 100644 (file)
@@ -1679,6 +1679,8 @@ static struct phy_driver marvell_drivers[] = {
                .ack_interrupt = &marvell_ack_interrupt,
                .config_intr = &marvell_config_intr,
                .did_interrupt = &m88e1121_did_interrupt,
+               .get_wol = &m88e1318_get_wol,
+               .set_wol = &m88e1318_set_wol,
                .resume = &marvell_resume,
                .suspend = &marvell_suspend,
                .get_sset_count = marvell_get_sset_count,
index c0b4e65267af8b541974bd3a246897e1c38a9ac9..46fe1ae919a30a9a9b7644b5862f4a5bfa0b56ef 100644 (file)
@@ -81,8 +81,6 @@ static int iproc_mdio_read(struct mii_bus *bus, int phy_id, int reg)
        if (rc)
                return rc;
 
-       iproc_mdio_config_clk(priv->base);
-
        /* Prepare the read operation */
        cmd = (MII_DATA_TA_VAL << MII_DATA_TA_SHIFT) |
                (reg << MII_DATA_RA_SHIFT) |
@@ -112,8 +110,6 @@ static int iproc_mdio_write(struct mii_bus *bus, int phy_id,
        if (rc)
                return rc;
 
-       iproc_mdio_config_clk(priv->base);
-
        /* Prepare the write operation */
        cmd = (MII_DATA_TA_VAL << MII_DATA_TA_SHIFT) |
                (reg << MII_DATA_RA_SHIFT) |
@@ -163,6 +159,8 @@ static int iproc_mdio_probe(struct platform_device *pdev)
        bus->read = iproc_mdio_read;
        bus->write = iproc_mdio_write;
 
+       iproc_mdio_config_clk(priv->base);
+
        rc = of_mdiobus_register(bus, pdev->dev.of_node);
        if (rc) {
                dev_err(&pdev->dev, "MDIO bus registration failed\n");
index 9a77289109b721ed5190f214bb4d6f22d2098424..6742070ca676f57694a9a6cb11364941deb520a0 100644 (file)
@@ -1008,6 +1008,20 @@ static struct phy_driver ksphy_driver[] = {
        .get_stats      = kszphy_get_stats,
        .suspend        = genphy_suspend,
        .resume         = genphy_resume,
+}, {
+       .phy_id         = PHY_ID_KSZ8795,
+       .phy_id_mask    = MICREL_PHY_ID_MASK,
+       .name           = "Micrel KSZ8795",
+       .features       = PHY_BASIC_FEATURES,
+       .flags          = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+       .config_init    = kszphy_config_init,
+       .config_aneg    = ksz8873mll_config_aneg,
+       .read_status    = ksz8873mll_read_status,
+       .get_sset_count = kszphy_get_sset_count,
+       .get_strings    = kszphy_get_strings,
+       .get_stats      = kszphy_get_stats,
+       .suspend        = genphy_suspend,
+       .resume         = genphy_resume,
 } };
 
 module_phy_driver(ksphy_driver);
index 48da6e93c3f783e07f61ae24151e3114ac8dc1ae..7cc1b7dcfe058fe48cb7209499caa1073ad2c1e9 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/mii.h>
 #include <linux/ethtool.h>
 #include <linux/phy.h>
+#include <linux/phy_led_triggers.h>
 #include <linux/timer.h>
 #include <linux/workqueue.h>
 #include <linux/mdio.h>
@@ -649,14 +650,18 @@ void phy_start_machine(struct phy_device *phydev)
  * phy_trigger_machine - trigger the state machine to run
  *
  * @phydev: the phy_device struct
+ * @sync: indicate whether we should wait for the workqueue cancelation
  *
  * Description: There has been a change in state which requires that the
  *   state machine runs.
  */
 
-static void phy_trigger_machine(struct phy_device *phydev)
+static void phy_trigger_machine(struct phy_device *phydev, bool sync)
 {
-       cancel_delayed_work_sync(&phydev->state_queue);
+       if (sync)
+               cancel_delayed_work_sync(&phydev->state_queue);
+       else
+               cancel_delayed_work(&phydev->state_queue);
        queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, 0);
 }
 
@@ -693,7 +698,7 @@ static void phy_error(struct phy_device *phydev)
        phydev->state = PHY_HALTED;
        mutex_unlock(&phydev->lock);
 
-       phy_trigger_machine(phydev);
+       phy_trigger_machine(phydev, false);
 }
 
 /**
@@ -840,7 +845,7 @@ void phy_change(struct phy_device *phydev)
        }
 
        /* reschedule state queue work to run as soon as possible */
-       phy_trigger_machine(phydev);
+       phy_trigger_machine(phydev, true);
        return;
 
 ignore:
@@ -942,7 +947,7 @@ void phy_start(struct phy_device *phydev)
        if (do_resume)
                phy_resume(phydev);
 
-       phy_trigger_machine(phydev);
+       phy_trigger_machine(phydev, true);
 }
 EXPORT_SYMBOL(phy_start);
 
index 92b08383cafa8b88e8d5b79ea3a5c0da9998770f..8c8e15b8739dec0ae96d72b514045a99eb1ed7e1 100644 (file)
@@ -908,6 +908,7 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
        struct module *ndev_owner = dev->dev.parent->driver->owner;
        struct mii_bus *bus = phydev->mdio.bus;
        struct device *d = &phydev->mdio.dev;
+       bool using_genphy = false;
        int err;
 
        /* For Ethernet device drivers that register their own MDIO bus, we
@@ -933,12 +934,22 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
                        d->driver =
                                &genphy_driver[GENPHY_DRV_1G].mdiodrv.driver;
 
+               using_genphy = true;
+       }
+
+       if (!try_module_get(d->driver->owner)) {
+               dev_err(&dev->dev, "failed to get the device driver module\n");
+               err = -EIO;
+               goto error_put_device;
+       }
+
+       if (using_genphy) {
                err = d->driver->probe(d);
                if (err >= 0)
                        err = device_bind_driver(d);
 
                if (err)
-                       goto error;
+                       goto error_module_put;
        }
 
        if (phydev->attached_dev) {
@@ -975,7 +986,13 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
        return err;
 
 error:
+       /* phy_detach() does all of the cleanup below */
        phy_detach(phydev);
+       return err;
+
+error_module_put:
+       module_put(d->driver->owner);
+error_put_device:
        put_device(d);
        if (ndev_owner != bus->owner)
                module_put(bus->owner);
@@ -1039,6 +1056,8 @@ void phy_detach(struct phy_device *phydev)
 
        phy_led_triggers_unregister(phydev);
 
+       module_put(phydev->mdio.dev.driver->owner);
+
        /* If the device had no specific driver before (i.e. - it
         * was using the generic driver), we unbind the device
         * from the generic driver so that there's a chance a
index fa62bdf2f52694dece215d2b62e548026e482c38..94ca42e630bbead0c4fcae0b2ef6c8b19296bb69 100644 (file)
@@ -12,6 +12,7 @@
  */
 #include <linux/leds.h>
 #include <linux/phy.h>
+#include <linux/phy_led_triggers.h>
 #include <linux/netdevice.h>
 
 static struct phy_led_trigger *phy_speed_to_led_trigger(struct phy_device *phy,
@@ -102,8 +103,10 @@ int phy_led_triggers_register(struct phy_device *phy)
                                            sizeof(struct phy_led_trigger) *
                                                   phy->phy_num_led_triggers,
                                            GFP_KERNEL);
-       if (!phy->phy_led_triggers)
-               return -ENOMEM;
+       if (!phy->phy_led_triggers) {
+               err = -ENOMEM;
+               goto out_clear;
+       }
 
        for (i = 0; i < phy->phy_num_led_triggers; i++) {
                err = phy_led_trigger_register(phy, &phy->phy_led_triggers[i],
@@ -120,6 +123,8 @@ out_unreg:
        while (i--)
                phy_led_trigger_unregister(&phy->phy_led_triggers[i]);
        devm_kfree(&phy->mdio.dev, phy->phy_led_triggers);
+out_clear:
+       phy->phy_num_led_triggers = 0;
        return err;
 }
 EXPORT_SYMBOL_GPL(phy_led_triggers_register);
index cd8e02c94be0e514f226b8109e59e5ec29dad7de..bfabe180053e414dee777e0e56b24eceef05c918 100644 (file)
@@ -1170,9 +1170,11 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
        }
 
        if (tun->flags & IFF_VNET_HDR) {
-               if (len < tun->vnet_hdr_sz)
+               int vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
+
+               if (len < vnet_hdr_sz)
                        return -EINVAL;
-               len -= tun->vnet_hdr_sz;
+               len -= vnet_hdr_sz;
 
                if (!copy_from_iter_full(&gso, sizeof(gso), from))
                        return -EFAULT;
@@ -1183,7 +1185,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
 
                if (tun16_to_cpu(tun, gso.hdr_len) > len)
                        return -EINVAL;
-               iov_iter_advance(from, tun->vnet_hdr_sz - sizeof(gso));
+               iov_iter_advance(from, vnet_hdr_sz - sizeof(gso));
        }
 
        if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) {
@@ -1335,7 +1337,7 @@ static ssize_t tun_put_user(struct tun_struct *tun,
                vlan_hlen = VLAN_HLEN;
 
        if (tun->flags & IFF_VNET_HDR)
-               vnet_hdr_sz = tun->vnet_hdr_sz;
+               vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
 
        total = skb->len + vlan_hlen + vnet_hdr_sz;
 
@@ -1360,7 +1362,7 @@ static ssize_t tun_put_user(struct tun_struct *tun,
                        return -EINVAL;
 
                if (virtio_net_hdr_from_skb(skb, &gso,
-                                           tun_is_little_endian(tun))) {
+                                           tun_is_little_endian(tun), true)) {
                        struct skb_shared_info *sinfo = skb_shinfo(skb);
                        pr_err("unexpected GSO type: "
                               "0x%x, gso_size %d, hdr_len %d\n",
index 3daa41bdd4eae0e5d44451458cb456175f8aedb2..0acc9b640419a2e94bc9a2d3d43a5a2a65800c8b 100644 (file)
@@ -776,7 +776,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
        struct net_device *netdev;
        struct catc *catc;
        u8 broadcast[ETH_ALEN];
-       int i, pktsz;
+       int pktsz, ret;
 
        if (usb_set_interface(usbdev,
                        intf->altsetting->desc.bInterfaceNumber, 1)) {
@@ -811,12 +811,8 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
        if ((!catc->ctrl_urb) || (!catc->tx_urb) || 
            (!catc->rx_urb) || (!catc->irq_urb)) {
                dev_err(&intf->dev, "No free urbs available.\n");
-               usb_free_urb(catc->ctrl_urb);
-               usb_free_urb(catc->tx_urb);
-               usb_free_urb(catc->rx_urb);
-               usb_free_urb(catc->irq_urb);
-               free_netdev(netdev);
-               return -ENOMEM;
+               ret = -ENOMEM;
+               goto fail_free;
        }
 
        /* The F5U011 has the same vendor/product as the netmate but a device version of 0x130 */
@@ -844,15 +840,24 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
                 catc->irq_buf, 2, catc_irq_done, catc, 1);
 
        if (!catc->is_f5u011) {
+               u32 *buf;
+               int i;
+
                dev_dbg(dev, "Checking memory size\n");
 
-               i = 0x12345678;
-               catc_write_mem(catc, 0x7a80, &i, 4);
-               i = 0x87654321; 
-               catc_write_mem(catc, 0xfa80, &i, 4);
-               catc_read_mem(catc, 0x7a80, &i, 4);
+               buf = kmalloc(4, GFP_KERNEL);
+               if (!buf) {
+                       ret = -ENOMEM;
+                       goto fail_free;
+               }
+
+               *buf = 0x12345678;
+               catc_write_mem(catc, 0x7a80, buf, 4);
+               *buf = 0x87654321;
+               catc_write_mem(catc, 0xfa80, buf, 4);
+               catc_read_mem(catc, 0x7a80, buf, 4);
          
-               switch (i) {
+               switch (*buf) {
                case 0x12345678:
                        catc_set_reg(catc, TxBufCount, 8);
                        catc_set_reg(catc, RxBufCount, 32);
@@ -867,6 +872,8 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
                        dev_dbg(dev, "32k Memory\n");
                        break;
                }
+
+               kfree(buf);
          
                dev_dbg(dev, "Getting MAC from SEEROM.\n");
          
@@ -913,16 +920,21 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
        usb_set_intfdata(intf, catc);
 
        SET_NETDEV_DEV(netdev, &intf->dev);
-       if (register_netdev(netdev) != 0) {
-               usb_set_intfdata(intf, NULL);
-               usb_free_urb(catc->ctrl_urb);
-               usb_free_urb(catc->tx_urb);
-               usb_free_urb(catc->rx_urb);
-               usb_free_urb(catc->irq_urb);
-               free_netdev(netdev);
-               return -EIO;
-       }
+       ret = register_netdev(netdev);
+       if (ret)
+               goto fail_clear_intfdata;
+
        return 0;
+
+fail_clear_intfdata:
+       usb_set_intfdata(intf, NULL);
+fail_free:
+       usb_free_urb(catc->ctrl_urb);
+       usb_free_urb(catc->tx_urb);
+       usb_free_urb(catc->rx_urb);
+       usb_free_urb(catc->irq_urb);
+       free_netdev(netdev);
+       return ret;
 }
 
 static void catc_disconnect(struct usb_interface *intf)
index fe7b2886cb6b8fea8935c739d7d50999ed414fcb..86144f9a80ee84a5d4914dace38fdd8ef2932672 100644 (file)
@@ -531,6 +531,7 @@ static const struct driver_info wwan_info = {
 #define SAMSUNG_VENDOR_ID      0x04e8
 #define LENOVO_VENDOR_ID       0x17ef
 #define NVIDIA_VENDOR_ID       0x0955
+#define HP_VENDOR_ID           0x03f0
 
 static const struct usb_device_id      products[] = {
 /* BLACKLIST !!
@@ -677,6 +678,13 @@ static const struct usb_device_id  products[] = {
        .driver_info = 0,
 },
 
+/* HP lt2523 (Novatel E371) - handled by qmi_wwan */
+{
+       USB_DEVICE_AND_INTERFACE_INFO(HP_VENDOR_ID, 0x421d, USB_CLASS_COMM,
+                                     USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+       .driver_info = 0,
+},
+
 /* AnyDATA ADU960S - handled by qmi_wwan */
 {
        USB_DEVICE_AND_INTERFACE_INFO(0x16d5, 0x650a, USB_CLASS_COMM,
index 24e803fe9a534c2e23dee6418496dd2755ff8de9..36674484c6fb9b73011619824f7bc60c50b9c1ad 100644 (file)
@@ -126,40 +126,61 @@ static void async_ctrl_callback(struct urb *urb)
 
 static int get_registers(pegasus_t *pegasus, __u16 indx, __u16 size, void *data)
 {
+       u8 *buf;
        int ret;
 
+       buf = kmalloc(size, GFP_NOIO);
+       if (!buf)
+               return -ENOMEM;
+
        ret = usb_control_msg(pegasus->usb, usb_rcvctrlpipe(pegasus->usb, 0),
                              PEGASUS_REQ_GET_REGS, PEGASUS_REQT_READ, 0,
-                             indx, data, size, 1000);
+                             indx, buf, size, 1000);
        if (ret < 0)
                netif_dbg(pegasus, drv, pegasus->net,
                          "%s returned %d\n", __func__, ret);
+       else if (ret <= size)
+               memcpy(data, buf, ret);
+       kfree(buf);
        return ret;
 }
 
-static int set_registers(pegasus_t *pegasus, __u16 indx, __u16 size, void *data)
+static int set_registers(pegasus_t *pegasus, __u16 indx, __u16 size,
+                        const void *data)
 {
+       u8 *buf;
        int ret;
 
+       buf = kmemdup(data, size, GFP_NOIO);
+       if (!buf)
+               return -ENOMEM;
+
        ret = usb_control_msg(pegasus->usb, usb_sndctrlpipe(pegasus->usb, 0),
                              PEGASUS_REQ_SET_REGS, PEGASUS_REQT_WRITE, 0,
-                             indx, data, size, 100);
+                             indx, buf, size, 100);
        if (ret < 0)
                netif_dbg(pegasus, drv, pegasus->net,
                          "%s returned %d\n", __func__, ret);
+       kfree(buf);
        return ret;
 }
 
 static int set_register(pegasus_t *pegasus, __u16 indx, __u8 data)
 {
+       u8 *buf;
        int ret;
 
+       buf = kmemdup(&data, 1, GFP_NOIO);
+       if (!buf)
+               return -ENOMEM;
+
        ret = usb_control_msg(pegasus->usb, usb_sndctrlpipe(pegasus->usb, 0),
                              PEGASUS_REQ_SET_REG, PEGASUS_REQT_WRITE, data,
-                             indx, &data, 1, 1000);
+                             indx, buf, 1, 1000);
        if (ret < 0)
                netif_dbg(pegasus, drv, pegasus->net,
                          "%s returned %d\n", __func__, ret);
+       kfree(buf);
        return ret;
 }
 
index 6fe1cdb0174f6cf91f8445dbe86a3e977d327255..24d5272cdce51091a26a116205d8ca2ae5be03d9 100644 (file)
@@ -654,6 +654,13 @@ static const struct usb_device_id products[] = {
                                              USB_CDC_PROTO_NONE),
                .driver_info        = (unsigned long)&qmi_wwan_info,
        },
+       {       /* HP lt2523 (Novatel E371) */
+               USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x421d,
+                                             USB_CLASS_COMM,
+                                             USB_CDC_SUBCLASS_ETHERNET,
+                                             USB_CDC_PROTO_NONE),
+               .driver_info        = (unsigned long)&qmi_wwan_info,
+       },
        {       /* HP lt4112 LTE/HSPA+ Gobi 4G Module (Huawei me906e) */
                USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x581d, USB_CLASS_VENDOR_SPEC, 1, 7),
                .driver_info = (unsigned long)&qmi_wwan_info,
index f3b48ad90865d036845b34ae0fa326dcb4fea297..ad42295356dd32b70009852247151b4856deea86 100644 (file)
@@ -32,7 +32,7 @@
 #define NETNEXT_VERSION                "08"
 
 /* Information for net */
-#define NET_VERSION            "6"
+#define NET_VERSION            "8"
 
 #define DRIVER_VERSION         "v1." NETNEXT_VERSION "." NET_VERSION
 #define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
@@ -1936,6 +1936,9 @@ static int r8152_poll(struct napi_struct *napi, int budget)
                napi_complete(napi);
                if (!list_empty(&tp->rx_done))
                        napi_schedule(napi);
+               else if (!skb_queue_empty(&tp->tx_queue) &&
+                        !list_empty(&tp->tx_free))
+                       napi_schedule(napi);
        }
 
        return work_done;
@@ -3155,10 +3158,13 @@ static void set_carrier(struct r8152 *tp)
                if (!netif_carrier_ok(netdev)) {
                        tp->rtl_ops.enable(tp);
                        set_bit(RTL8152_SET_RX_MODE, &tp->flags);
+                       netif_stop_queue(netdev);
                        napi_disable(&tp->napi);
                        netif_carrier_on(netdev);
                        rtl_start_rx(tp);
                        napi_enable(&tp->napi);
+                       netif_wake_queue(netdev);
+                       netif_info(tp, link, netdev, "carrier on\n");
                }
        } else {
                if (netif_carrier_ok(netdev)) {
@@ -3166,6 +3172,7 @@ static void set_carrier(struct r8152 *tp)
                        napi_disable(&tp->napi);
                        tp->rtl_ops.disable(tp);
                        napi_enable(&tp->napi);
+                       netif_info(tp, link, netdev, "carrier off\n");
                }
        }
 }
@@ -3515,12 +3522,12 @@ static int rtl8152_pre_reset(struct usb_interface *intf)
        if (!netif_running(netdev))
                return 0;
 
+       netif_stop_queue(netdev);
        napi_disable(&tp->napi);
        clear_bit(WORK_ENABLE, &tp->flags);
        usb_kill_urb(tp->intr_urb);
        cancel_delayed_work_sync(&tp->schedule);
        if (netif_carrier_ok(netdev)) {
-               netif_stop_queue(netdev);
                mutex_lock(&tp->control);
                tp->rtl_ops.disable(tp);
                mutex_unlock(&tp->control);
@@ -3545,12 +3552,17 @@ static int rtl8152_post_reset(struct usb_interface *intf)
        if (netif_carrier_ok(netdev)) {
                mutex_lock(&tp->control);
                tp->rtl_ops.enable(tp);
+               rtl_start_rx(tp);
                rtl8152_set_rx_mode(netdev);
                mutex_unlock(&tp->control);
-               netif_wake_queue(netdev);
        }
 
        napi_enable(&tp->napi);
+       netif_wake_queue(netdev);
+       usb_submit_urb(tp->intr_urb, GFP_KERNEL);
+
+       if (!list_empty(&tp->rx_done))
+               napi_schedule(&tp->napi);
 
        return 0;
 }
@@ -3572,6 +3584,8 @@ static bool delay_autosuspend(struct r8152 *tp)
         */
        if (!sw_linking && tp->rtl_ops.in_nway(tp))
                return true;
+       else if (!skb_queue_empty(&tp->tx_queue))
+               return true;
        else
                return false;
 }
@@ -3581,10 +3595,15 @@ static int rtl8152_rumtime_suspend(struct r8152 *tp)
        struct net_device *netdev = tp->netdev;
        int ret = 0;
 
+       set_bit(SELECTIVE_SUSPEND, &tp->flags);
+       smp_mb__after_atomic();
+
        if (netif_running(netdev) && test_bit(WORK_ENABLE, &tp->flags)) {
                u32 rcr = 0;
 
                if (delay_autosuspend(tp)) {
+                       clear_bit(SELECTIVE_SUSPEND, &tp->flags);
+                       smp_mb__after_atomic();
                        ret = -EBUSY;
                        goto out1;
                }
@@ -3601,6 +3620,8 @@ static int rtl8152_rumtime_suspend(struct r8152 *tp)
                        if (!(ocp_data & RXFIFO_EMPTY)) {
                                rxdy_gated_en(tp, false);
                                ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, rcr);
+                               clear_bit(SELECTIVE_SUSPEND, &tp->flags);
+                               smp_mb__after_atomic();
                                ret = -EBUSY;
                                goto out1;
                        }
@@ -3620,8 +3641,6 @@ static int rtl8152_rumtime_suspend(struct r8152 *tp)
                }
        }
 
-       set_bit(SELECTIVE_SUSPEND, &tp->flags);
-
 out1:
        return ret;
 }
@@ -3677,12 +3696,15 @@ static int rtl8152_resume(struct usb_interface *intf)
        if (netif_running(tp->netdev) && tp->netdev->flags & IFF_UP) {
                if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
                        tp->rtl_ops.autosuspend_en(tp, false);
-                       clear_bit(SELECTIVE_SUSPEND, &tp->flags);
                        napi_disable(&tp->napi);
                        set_bit(WORK_ENABLE, &tp->flags);
                        if (netif_carrier_ok(tp->netdev))
                                rtl_start_rx(tp);
                        napi_enable(&tp->napi);
+                       clear_bit(SELECTIVE_SUSPEND, &tp->flags);
+                       smp_mb__after_atomic();
+                       if (!list_empty(&tp->rx_done))
+                               napi_schedule(&tp->napi);
                } else {
                        tp->rtl_ops.up(tp);
                        netif_carrier_off(tp->netdev);
index 95b7bd0d7abcac85482da6067626e034a57b56b3..c81c79110cefca9443d614679d8e7cdd4b3295c3 100644 (file)
@@ -155,16 +155,36 @@ static const char driver_name [] = "rtl8150";
 */
 static int get_registers(rtl8150_t * dev, u16 indx, u16 size, void *data)
 {
-       return usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
-                              RTL8150_REQ_GET_REGS, RTL8150_REQT_READ,
-                              indx, 0, data, size, 500);
+       void *buf;
+       int ret;
+
+       buf = kmalloc(size, GFP_NOIO);
+       if (!buf)
+               return -ENOMEM;
+
+       ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
+                             RTL8150_REQ_GET_REGS, RTL8150_REQT_READ,
+                             indx, 0, buf, size, 500);
+       if (ret > 0 && ret <= size)
+               memcpy(data, buf, ret);
+       kfree(buf);
+       return ret;
 }
 
-static int set_registers(rtl8150_t * dev, u16 indx, u16 size, void *data)
+static int set_registers(rtl8150_t * dev, u16 indx, u16 size, const void *data)
 {
-       return usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
-                              RTL8150_REQ_SET_REGS, RTL8150_REQT_WRITE,
-                              indx, 0, data, size, 500);
+       void *buf;
+       int ret;
+
+       buf = kmemdup(data, size, GFP_NOIO);
+       if (!buf)
+               return -ENOMEM;
+
+       ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
+                             RTL8150_REQ_SET_REGS, RTL8150_REQT_WRITE,
+                             indx, 0, buf, size, 500);
+       kfree(buf);
+       return ret;
 }
 
 static void async_set_reg_cb(struct urb *urb)
index 12071f1582df2c9b4e867e432f972659d3f78dc4..d9440bc022f2c40d965f6a6dd804f7ba74944cbc 100644 (file)
@@ -73,8 +73,6 @@ static        atomic_t iface_counter = ATOMIC_INIT(0);
 /* Private data structure */
 struct sierra_net_data {
 
-       u8 ethr_hdr_tmpl[ETH_HLEN]; /* ethernet header template for rx'd pkts */
-
        u16 link_up;            /* air link up or down */
        u8 tx_hdr_template[4];  /* part of HIP hdr for tx'd packets */
 
@@ -122,6 +120,7 @@ struct param {
 
 /* LSI Protocol types */
 #define SIERRA_NET_PROTOCOL_UMTS      0x01
+#define SIERRA_NET_PROTOCOL_UMTS_DS   0x04
 /* LSI Coverage */
 #define SIERRA_NET_COVERAGE_NONE      0x00
 #define SIERRA_NET_COVERAGE_NOPACKET  0x01
@@ -129,7 +128,8 @@ struct param {
 /* LSI Session */
 #define SIERRA_NET_SESSION_IDLE       0x00
 /* LSI Link types */
-#define SIERRA_NET_AS_LINK_TYPE_IPv4  0x00
+#define SIERRA_NET_AS_LINK_TYPE_IPV4  0x00
+#define SIERRA_NET_AS_LINK_TYPE_IPV6  0x02
 
 struct lsi_umts {
        u8 protocol;
@@ -137,9 +137,14 @@ struct lsi_umts {
        __be16 length;
        /* eventually use a union for the rest - assume umts for now */
        u8 coverage;
-       u8 unused2[41];
+       u8 network_len; /* network name len */
+       u8 network[40]; /* network name (UCS2, bigendian) */
        u8 session_state;
        u8 unused3[33];
+} __packed;
+
+struct lsi_umts_single {
+       struct lsi_umts lsi;
        u8 link_type;
        u8 pdp_addr_len; /* NW-supplied PDP address len */
        u8 pdp_addr[16]; /* NW-supplied PDP address (bigendian)) */
@@ -158,10 +163,31 @@ struct lsi_umts {
        u8 reserved[8];
 } __packed;
 
+struct lsi_umts_dual {
+       struct lsi_umts lsi;
+       u8 pdp_addr4_len; /* NW-supplied PDP IPv4 address len */
+       u8 pdp_addr4[4];  /* NW-supplied PDP IPv4 address (bigendian)) */
+       u8 pdp_addr6_len; /* NW-supplied PDP IPv6 address len */
+       u8 pdp_addr6[16]; /* NW-supplied PDP IPv6 address (bigendian)) */
+       u8 unused4[23];
+       u8 dns1_addr4_len; /* NW-supplied 1st DNS v4 address len (bigendian) */
+       u8 dns1_addr4[4];  /* NW-supplied 1st DNS v4 address */
+       u8 dns1_addr6_len; /* NW-supplied 1st DNS v6 address len */
+       u8 dns1_addr6[16]; /* NW-supplied 1st DNS v6 address (bigendian)*/
+       u8 dns2_addr4_len; /* NW-supplied 2nd DNS v4 address len (bigendian) */
+       u8 dns2_addr4[4];  /* NW-supplied 2nd DNS v4 address */
+       u8 dns2_addr6_len; /* NW-supplied 2nd DNS v6 address len */
+       u8 dns2_addr6[16]; /* NW-supplied 2nd DNS v6 address (bigendian)*/
+       u8 unused5[68];
+} __packed;
+
 #define SIERRA_NET_LSI_COMMON_LEN      4
-#define SIERRA_NET_LSI_UMTS_LEN        (sizeof(struct lsi_umts))
+#define SIERRA_NET_LSI_UMTS_LEN        (sizeof(struct lsi_umts_single))
 #define SIERRA_NET_LSI_UMTS_STATUS_LEN \
        (SIERRA_NET_LSI_UMTS_LEN - SIERRA_NET_LSI_COMMON_LEN)
+#define SIERRA_NET_LSI_UMTS_DS_LEN     (sizeof(struct lsi_umts_dual))
+#define SIERRA_NET_LSI_UMTS_DS_STATUS_LEN \
+       (SIERRA_NET_LSI_UMTS_DS_LEN - SIERRA_NET_LSI_COMMON_LEN)
 
 /* Forward definitions */
 static void sierra_sync_timer(unsigned long syncdata);
@@ -190,10 +216,11 @@ static inline void sierra_net_set_private(struct usbnet *dev,
        dev->data[0] = (unsigned long)priv;
 }
 
-/* is packet IPv4 */
+/* is packet IPv4/IPv6 */
 static inline int is_ip(struct sk_buff *skb)
 {
-       return skb->protocol == cpu_to_be16(ETH_P_IP);
+       return skb->protocol == cpu_to_be16(ETH_P_IP) ||
+              skb->protocol == cpu_to_be16(ETH_P_IPV6);
 }
 
 /*
@@ -349,49 +376,54 @@ static inline int sierra_net_is_valid_addrlen(u8 len)
 static int sierra_net_parse_lsi(struct usbnet *dev, char *data, int datalen)
 {
        struct lsi_umts *lsi = (struct lsi_umts *)data;
+       u32 expected_length;
 
-       if (datalen < sizeof(struct lsi_umts)) {
-               netdev_err(dev->net, "%s: Data length %d, exp %Zu\n",
-                               __func__, datalen,
-                               sizeof(struct lsi_umts));
+       if (datalen < sizeof(struct lsi_umts_single)) {
+               netdev_err(dev->net, "%s: Data length %d, exp >= %Zu\n",
+                          __func__, datalen, sizeof(struct lsi_umts_single));
                return -1;
        }
 
-       if (lsi->length != cpu_to_be16(SIERRA_NET_LSI_UMTS_STATUS_LEN)) {
-               netdev_err(dev->net, "%s: LSI_UMTS_STATUS_LEN %d, exp %u\n",
-                               __func__, be16_to_cpu(lsi->length),
-                               (u32)SIERRA_NET_LSI_UMTS_STATUS_LEN);
-               return -1;
+       /* Validate the session state */
+       if (lsi->session_state == SIERRA_NET_SESSION_IDLE) {
+               netdev_err(dev->net, "Session idle, 0x%02x\n",
+                          lsi->session_state);
+               return 0;
        }
 
        /* Validate the protocol  - only support UMTS for now */
-       if (lsi->protocol != SIERRA_NET_PROTOCOL_UMTS) {
+       if (lsi->protocol == SIERRA_NET_PROTOCOL_UMTS) {
+               struct lsi_umts_single *single = (struct lsi_umts_single *)lsi;
+
+               /* Validate the link type */
+               if (single->link_type != SIERRA_NET_AS_LINK_TYPE_IPV4 &&
+                   single->link_type != SIERRA_NET_AS_LINK_TYPE_IPV6) {
+                       netdev_err(dev->net, "Link type unsupported: 0x%02x\n",
+                                  single->link_type);
+                       return -1;
+               }
+               expected_length = SIERRA_NET_LSI_UMTS_STATUS_LEN;
+       } else if (lsi->protocol == SIERRA_NET_PROTOCOL_UMTS_DS) {
+               expected_length = SIERRA_NET_LSI_UMTS_DS_STATUS_LEN;
+       } else {
                netdev_err(dev->net, "Protocol unsupported, 0x%02x\n",
-                       lsi->protocol);
+                          lsi->protocol);
                return -1;
        }
 
-       /* Validate the link type */
-       if (lsi->link_type != SIERRA_NET_AS_LINK_TYPE_IPv4) {
-               netdev_err(dev->net, "Link type unsupported: 0x%02x\n",
-                       lsi->link_type);
+       if (be16_to_cpu(lsi->length) != expected_length) {
+               netdev_err(dev->net, "%s: LSI_UMTS_STATUS_LEN %d, exp %u\n",
+                          __func__, be16_to_cpu(lsi->length), expected_length);
                return -1;
        }
 
        /* Validate the coverage */
-       if (lsi->coverage == SIERRA_NET_COVERAGE_NONE
-          || lsi->coverage == SIERRA_NET_COVERAGE_NOPACKET) {
+       if (lsi->coverage == SIERRA_NET_COVERAGE_NONE ||
+           lsi->coverage == SIERRA_NET_COVERAGE_NOPACKET) {
                netdev_err(dev->net, "No coverage, 0x%02x\n", lsi->coverage);
                return 0;
        }
 
-       /* Validate the session state */
-       if (lsi->session_state == SIERRA_NET_SESSION_IDLE) {
-               netdev_err(dev->net, "Session idle, 0x%02x\n",
-                       lsi->session_state);
-               return 0;
-       }
-
        /* Set link_sense true */
        return 1;
 }
@@ -652,7 +684,6 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
        u8      numendpoints;
        u16     fwattr = 0;
        int     status;
-       struct ethhdr *eth;
        struct sierra_net_data *priv;
        static const u8 sync_tmplate[sizeof(priv->sync_msg)] = {
                0x00, 0x00, SIERRA_NET_HIP_MSYNC_ID, 0x00};
@@ -690,11 +721,6 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
        dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return(&iface_counter);
        dev->net->dev_addr[ETH_ALEN-1] = ifacenum;
 
-       /* we will have to manufacture ethernet headers, prepare template */
-       eth = (struct ethhdr *)priv->ethr_hdr_tmpl;
-       memcpy(&eth->h_dest, dev->net->dev_addr, ETH_ALEN);
-       eth->h_proto = cpu_to_be16(ETH_P_IP);
-
        /* prepare shutdown message template */
        memcpy(priv->shdwn_msg, shdwn_tmplate, sizeof(priv->shdwn_msg));
        /* set context index initially to 0 - prepares tx hdr template */
@@ -824,9 +850,14 @@ static int sierra_net_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
 
                skb_pull(skb, hh.hdrlen);
 
-               /* We are going to accept this packet, prepare it */
-               memcpy(skb->data, sierra_net_get_private(dev)->ethr_hdr_tmpl,
-                       ETH_HLEN);
+               /* We are going to accept this packet, prepare it.
+                * In case protocol is IPv6, keep it, otherwise force IPv4.
+                */
+               skb_reset_mac_header(skb);
+               if (eth_hdr(skb)->h_proto != cpu_to_be16(ETH_P_IPV6))
+                       eth_hdr(skb)->h_proto = cpu_to_be16(ETH_P_IP);
+               eth_zero_addr(eth_hdr(skb)->h_source);
+               memcpy(eth_hdr(skb)->h_dest, dev->net->dev_addr, ETH_ALEN);
 
                /* Last packet in batch handled by usbnet */
                if (hh.payload_len.word == skb->len)
index 4a105006ca637bc985698fa378ff6d14e1494b3f..765c2d6358daf38203cdb1a50a31cc04f65c1968 100644 (file)
@@ -48,8 +48,16 @@ module_param(gso, bool, 0444);
  */
 DECLARE_EWMA(pkt_len, 1, 64)
 
+/* With mergeable buffers we align buffer address and use the low bits to
+ * encode its true size. Buffer size is up to 1 page so we need to align to
+ * square root of page size to ensure we reserve enough bits to encode the true
+ * size.
+ */
+#define MERGEABLE_BUFFER_MIN_ALIGN_SHIFT ((PAGE_SHIFT + 1) / 2)
+
 /* Minimum alignment for mergeable packet buffers. */
-#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256)
+#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, \
+                                  1 << MERGEABLE_BUFFER_MIN_ALIGN_SHIFT)
 
 #define VIRTNET_DRIVER_VERSION "1.0.0"
 
@@ -1104,7 +1112,7 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
                hdr = skb_vnet_hdr(skb);
 
        if (virtio_net_hdr_from_skb(skb, &hdr->hdr,
-                                   virtio_is_little_endian(vi->vdev)))
+                                   virtio_is_little_endian(vi->vdev), false))
                BUG();
 
        if (vi->mergeable_rx_bufs)
@@ -1707,6 +1715,11 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog)
        u16 xdp_qp = 0, curr_qp;
        int i, err;
 
+       if (prog && prog->xdp_adjust_head) {
+               netdev_warn(dev, "Does not support bpf_xdp_adjust_head()\n");
+               return -EOPNOTSUPP;
+       }
+
        if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) ||
            virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) ||
            virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
@@ -1890,8 +1903,12 @@ static void free_receive_page_frags(struct virtnet_info *vi)
                        put_page(vi->rq[i].alloc_frag.page);
 }
 
-static bool is_xdp_queue(struct virtnet_info *vi, int q)
+static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
 {
+       /* For small receive mode always use kfree_skb variants */
+       if (!vi->mergeable_rx_bufs)
+               return false;
+
        if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
                return false;
        else if (q < vi->curr_queue_pairs)
@@ -1908,7 +1925,7 @@ static void free_unused_bufs(struct virtnet_info *vi)
        for (i = 0; i < vi->max_queue_pairs; i++) {
                struct virtqueue *vq = vi->sq[i].vq;
                while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
-                       if (!is_xdp_queue(vi, i))
+                       if (!is_xdp_raw_buffer_queue(vi, i))
                                dev_kfree_skb(buf);
                        else
                                put_page(virt_to_head_page(buf));
index ca7196c400609b8cfe6fe9da5efae19770258de8..30b04cf2bb1e08f89ac93c086b17bf8d59df6b37 100644 (file)
@@ -2268,7 +2268,7 @@ static void vxlan_cleanup(unsigned long arg)
                                = container_of(p, struct vxlan_fdb, hlist);
                        unsigned long timeout;
 
-                       if (f->state & NUD_PERMANENT)
+                       if (f->state & (NUD_PERMANENT | NUD_NOARP))
                                continue;
 
                        timeout = f->used + vxlan->cfg.age_interval * HZ;
@@ -2354,7 +2354,7 @@ static int vxlan_open(struct net_device *dev)
 }
 
 /* Purge the forwarding table */
-static void vxlan_flush(struct vxlan_dev *vxlan)
+static void vxlan_flush(struct vxlan_dev *vxlan, bool do_all)
 {
        unsigned int h;
 
@@ -2364,6 +2364,8 @@ static void vxlan_flush(struct vxlan_dev *vxlan)
                hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
                        struct vxlan_fdb *f
                                = container_of(p, struct vxlan_fdb, hlist);
+                       if (!do_all && (f->state & (NUD_PERMANENT | NUD_NOARP)))
+                               continue;
                        /* the all_zeros_mac entry is deleted at vxlan_uninit */
                        if (!is_zero_ether_addr(f->eth_addr))
                                vxlan_fdb_destroy(vxlan, f);
@@ -2385,7 +2387,7 @@ static int vxlan_stop(struct net_device *dev)
 
        del_timer_sync(&vxlan->age_timer);
 
-       vxlan_flush(vxlan);
+       vxlan_flush(vxlan, false);
        vxlan_sock_release(vxlan);
 
        return ret;
@@ -2437,7 +2439,8 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
 
                rt = vxlan_get_route(vxlan, dev, sock4, skb, 0, info->key.tos,
                                     info->key.u.ipv4.dst,
-                                    &info->key.u.ipv4.src, dport, sport, NULL, info);
+                                    &info->key.u.ipv4.src, dport, sport,
+                                    &info->dst_cache, info);
                if (IS_ERR(rt))
                        return PTR_ERR(rt);
                ip_rt_put(rt);
@@ -2448,7 +2451,8 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
 
                ndst = vxlan6_get_route(vxlan, dev, sock6, skb, 0, info->key.tos,
                                        info->key.label, &info->key.u.ipv6.dst,
-                                       &info->key.u.ipv6.src, dport, sport, NULL, info);
+                                       &info->key.u.ipv6.src, dport, sport,
+                                       &info->dst_cache, info);
                if (IS_ERR(ndst))
                        return PTR_ERR(ndst);
                dst_release(ndst);
@@ -2890,7 +2894,7 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
        memcpy(&vxlan->cfg, conf, sizeof(*conf));
        if (!vxlan->cfg.dst_port) {
                if (conf->flags & VXLAN_F_GPE)
-                       vxlan->cfg.dst_port = 4790; /* IANA assigned VXLAN-GPE port */
+                       vxlan->cfg.dst_port = htons(4790); /* IANA VXLAN-GPE port */
                else
                        vxlan->cfg.dst_port = default_port;
        }
@@ -3058,6 +3062,8 @@ static void vxlan_dellink(struct net_device *dev, struct list_head *head)
        struct vxlan_dev *vxlan = netdev_priv(dev);
        struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
 
+       vxlan_flush(vxlan, true);
+
        spin_lock(&vn->sock_lock);
        if (!hlist_unhashed(&vxlan->hlist))
                hlist_del_rcu(&vxlan->hlist);
index d02ca1491d16cede66389540f8cb92dda5749ff3..8d3e53fac1dabc01ed875b6f8c2863bb908f770c 100644 (file)
@@ -91,7 +91,7 @@
 
 #define IWL8000_FW_PRE "iwlwifi-8000C-"
 #define IWL8000_MODULE_FIRMWARE(api) \
-       IWL8000_FW_PRE "-" __stringify(api) ".ucode"
+       IWL8000_FW_PRE __stringify(api) ".ucode"
 
 #define IWL8265_FW_PRE "iwlwifi-8265-"
 #define IWL8265_MODULE_FIRMWARE(api) \
index 636c8b03e31892bd30e3a3d7a6b1e9b8a8eb02ea..09e9e2e3ed040202f0cb40c1e326584b0fa7465a 100644 (file)
@@ -1164,9 +1164,10 @@ static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
                .frame_limit = IWL_FRAME_LIMIT,
        };
 
-       /* Make sure reserved queue is still marked as such (or allocated) */
-       mvm->queue_info[mvm_sta->reserved_queue].status =
-               IWL_MVM_QUEUE_RESERVED;
+       /* Make sure reserved queue is still marked as such (if allocated) */
+       if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE)
+               mvm->queue_info[mvm_sta->reserved_queue].status =
+                       IWL_MVM_QUEUE_RESERVED;
 
        for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
                struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i];
index 63a051be832ed44b30978b4d031464b15765df20..bec7d9c46087d3c8fed48d5858f4f116797eceed 100644 (file)
@@ -843,8 +843,10 @@ static void iwl_mvm_thermal_zone_unregister(struct iwl_mvm *mvm)
                return;
 
        IWL_DEBUG_TEMP(mvm, "Thermal zone device unregister\n");
-       thermal_zone_device_unregister(mvm->tz_device.tzone);
-       mvm->tz_device.tzone = NULL;
+       if (mvm->tz_device.tzone) {
+               thermal_zone_device_unregister(mvm->tz_device.tzone);
+               mvm->tz_device.tzone = NULL;
+       }
 }
 
 static void iwl_mvm_cooling_device_unregister(struct iwl_mvm *mvm)
@@ -853,8 +855,10 @@ static void iwl_mvm_cooling_device_unregister(struct iwl_mvm *mvm)
                return;
 
        IWL_DEBUG_TEMP(mvm, "Cooling device unregister\n");
-       thermal_cooling_device_unregister(mvm->cooling_dev.cdev);
-       mvm->cooling_dev.cdev = NULL;
+       if (mvm->cooling_dev.cdev) {
+               thermal_cooling_device_unregister(mvm->cooling_dev.cdev);
+               mvm->cooling_dev.cdev = NULL;
+       }
 }
 #endif /* CONFIG_THERMAL */
 
index 691ddef1ae28eab7d2a193fca3a72baa7871e480..a33a06d58a9ae8496e9baa2cd7a6e2ac1f3087e2 100644 (file)
@@ -92,7 +92,7 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw)
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
        struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-       char *fw_name = "rtlwifi/rtl8192cfwU.bin";
+       char *fw_name;
 
        rtl8192ce_bt_reg_init(hw);
 
@@ -164,8 +164,13 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw)
        }
 
        /* request fw */
-       if (IS_81XXC_VENDOR_UMC_B_CUT(rtlhal->version))
+       if (IS_VENDOR_UMC_A_CUT(rtlhal->version) &&
+           !IS_92C_SERIAL(rtlhal->version))
+               fw_name = "rtlwifi/rtl8192cfwU.bin";
+       else if (IS_81XXC_VENDOR_UMC_B_CUT(rtlhal->version))
                fw_name = "rtlwifi/rtl8192cfwU_B.bin";
+       else
+               fw_name = "rtlwifi/rtl8192cfw.bin";
 
        rtlpriv->max_fw_size = 0x4000;
        pr_info("Using firmware %s\n", fw_name);
index 3ce1f7da864742a2aa1fe268ffd440b09764ea05..530586be05b4357dc8ee6439b7a9e225bce012c6 100644 (file)
@@ -113,10 +113,10 @@ struct xenvif_stats {
         * A subset of struct net_device_stats that contains only the
         * fields that are updated in netback.c for each queue.
         */
-       unsigned int rx_bytes;
-       unsigned int rx_packets;
-       unsigned int tx_bytes;
-       unsigned int tx_packets;
+       u64 rx_bytes;
+       u64 rx_packets;
+       u64 tx_bytes;
+       u64 tx_packets;
 
        /* Additional stats used by xenvif */
        unsigned long rx_gso_checksum_fixup;
index e30ffd29b7e913f2514c4c925979ffd474190acc..50fa1692d98516acc8257f7480c4d9ddc9f443ea 100644 (file)
@@ -221,18 +221,18 @@ static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
 {
        struct xenvif *vif = netdev_priv(dev);
        struct xenvif_queue *queue = NULL;
-       unsigned int num_queues = vif->num_queues;
-       unsigned long rx_bytes = 0;
-       unsigned long rx_packets = 0;
-       unsigned long tx_bytes = 0;
-       unsigned long tx_packets = 0;
+       u64 rx_bytes = 0;
+       u64 rx_packets = 0;
+       u64 tx_bytes = 0;
+       u64 tx_packets = 0;
        unsigned int index;
 
+       spin_lock(&vif->lock);
        if (vif->queues == NULL)
                goto out;
 
        /* Aggregate tx and rx stats from each queue */
-       for (index = 0; index < num_queues; ++index) {
+       for (index = 0; index < vif->num_queues; ++index) {
                queue = &vif->queues[index];
                rx_bytes += queue->stats.rx_bytes;
                rx_packets += queue->stats.rx_packets;
@@ -241,6 +241,8 @@ static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
        }
 
 out:
+       spin_unlock(&vif->lock);
+
        vif->dev->stats.rx_bytes = rx_bytes;
        vif->dev->stats.rx_packets = rx_packets;
        vif->dev->stats.tx_bytes = tx_bytes;
index 3124eaec942745fe9eb6cd4a49ee8bc633d80e67..85b742e1c42fa75bc771db4e8b91f80f3fe68d75 100644 (file)
@@ -493,11 +493,22 @@ static int backend_create_xenvif(struct backend_info *be)
 static void backend_disconnect(struct backend_info *be)
 {
        if (be->vif) {
+               unsigned int queue_index;
+
                xen_unregister_watchers(be->vif);
 #ifdef CONFIG_DEBUG_FS
                xenvif_debugfs_delif(be->vif);
 #endif /* CONFIG_DEBUG_FS */
                xenvif_disconnect_data(be->vif);
+               for (queue_index = 0; queue_index < be->vif->num_queues; ++queue_index)
+                       xenvif_deinit_queue(&be->vif->queues[queue_index]);
+
+               spin_lock(&be->vif->lock);
+               vfree(be->vif->queues);
+               be->vif->num_queues = 0;
+               be->vif->queues = NULL;
+               spin_unlock(&be->vif->lock);
+
                xenvif_disconnect_ctrl(be->vif);
        }
 }
@@ -1034,6 +1045,8 @@ static void connect(struct backend_info *be)
 err:
        if (be->vif->num_queues > 0)
                xenvif_disconnect_data(be->vif); /* Clean up existing queues */
+       for (queue_index = 0; queue_index < be->vif->num_queues; ++queue_index)
+               xenvif_deinit_queue(&be->vif->queues[queue_index]);
        vfree(be->vif->queues);
        be->vif->queues = NULL;
        be->vif->num_queues = 0;
index a479cd99911d8a06cdc37b92682b58b101500846..1e4125a98291245f5e806a79247a92ca1418092b 100644 (file)
@@ -281,6 +281,7 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
 {
        RING_IDX req_prod = queue->rx.req_prod_pvt;
        int notify;
+       int err = 0;
 
        if (unlikely(!netif_carrier_ok(queue->info->netdev)))
                return;
@@ -295,8 +296,10 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
                struct xen_netif_rx_request *req;
 
                skb = xennet_alloc_one_rx_buffer(queue);
-               if (!skb)
+               if (!skb) {
+                       err = -ENOMEM;
                        break;
+               }
 
                id = xennet_rxidx(req_prod);
 
@@ -320,8 +323,13 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
 
        queue->rx.req_prod_pvt = req_prod;
 
-       /* Not enough requests? Try again later. */
-       if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN) {
+       /* Try again later if there are not enough requests or skb allocation
+        * failed.
+        * Enough requests is quantified as the sum of newly created slots and
+        * the unconsumed slots at the backend.
+        */
+       if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN ||
+           unlikely(err)) {
                mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10));
                return;
        }
@@ -1379,6 +1387,8 @@ static void xennet_disconnect_backend(struct netfront_info *info)
        for (i = 0; i < num_queues && info->queues; ++i) {
                struct netfront_queue *queue = &info->queues[i];
 
+               del_timer_sync(&queue->rx_refill_timer);
+
                if (queue->tx_irq && (queue->tx_irq == queue->rx_irq))
                        unbind_from_irqhandler(queue->tx_irq, queue);
                if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) {
@@ -1733,7 +1743,6 @@ static void xennet_destroy_queues(struct netfront_info *info)
 
                if (netif_running(info->netdev))
                        napi_disable(&queue->napi);
-               del_timer_sync(&queue->rx_refill_timer);
                netif_napi_del(&queue->napi);
        }
 
@@ -1822,27 +1831,19 @@ static int talk_to_netback(struct xenbus_device *dev,
                xennet_destroy_queues(info);
 
        err = xennet_create_queues(info, &num_queues);
-       if (err < 0)
-               goto destroy_ring;
+       if (err < 0) {
+               xenbus_dev_fatal(dev, err, "creating queues");
+               kfree(info->queues);
+               info->queues = NULL;
+               goto out;
+       }
 
        /* Create shared ring, alloc event channel -- for each queue */
        for (i = 0; i < num_queues; ++i) {
                queue = &info->queues[i];
                err = setup_netfront(dev, queue, feature_split_evtchn);
-               if (err) {
-                       /* setup_netfront() will tidy up the current
-                        * queue on error, but we need to clean up
-                        * those already allocated.
-                        */
-                       if (i > 0) {
-                               rtnl_lock();
-                               netif_set_real_num_tx_queues(info->netdev, i);
-                               rtnl_unlock();
-                               goto destroy_ring;
-                       } else {
-                               goto out;
-                       }
-               }
+               if (err)
+                       goto destroy_ring;
        }
 
 again:
@@ -1932,9 +1933,10 @@ abort_transaction_no_dev_fatal:
        xenbus_transaction_end(xbt, 1);
  destroy_ring:
        xennet_disconnect_backend(info);
-       kfree(info->queues);
-       info->queues = NULL;
+       xennet_destroy_queues(info);
  out:
+       unregister_netdev(info->netdev);
+       xennet_free_netdev(info->netdev);
        return err;
 }
 
index eca9688bf9d9fdff11ea091b9b2e3d9c115ab8e0..c00238491673766e05bc5bd2d2d3bec4aac30484 100644 (file)
@@ -1629,6 +1629,28 @@ static void atom_deinit_dev(struct intel_ntb_dev *ndev)
 
 /* Skylake Xeon NTB */
 
+static int skx_poll_link(struct intel_ntb_dev *ndev)
+{
+       u16 reg_val;
+       int rc;
+
+       ndev->reg->db_iowrite(ndev->db_link_mask,
+                             ndev->self_mmio +
+                             ndev->self_reg->db_clear);
+
+       rc = pci_read_config_word(ndev->ntb.pdev,
+                                 SKX_LINK_STATUS_OFFSET, &reg_val);
+       if (rc)
+               return 0;
+
+       if (reg_val == ndev->lnk_sta)
+               return 0;
+
+       ndev->lnk_sta = reg_val;
+
+       return 1;
+}
+
 static u64 skx_db_ioread(void __iomem *mmio)
 {
        return ioread64(mmio);
@@ -2852,7 +2874,7 @@ static struct intel_b2b_addr xeon_b2b_dsd_addr = {
 };
 
 static const struct intel_ntb_reg skx_reg = {
-       .poll_link              = xeon_poll_link,
+       .poll_link              = skx_poll_link,
        .link_is_up             = xeon_link_is_up,
        .db_ioread              = skx_db_ioread,
        .db_iowrite             = skx_db_iowrite,
index f81aa4b18d9f4dd76ec0be63f46865d3b04f89c6..02ca45fdd89203f31246f552811b1264059232d6 100644 (file)
@@ -1802,7 +1802,7 @@ ntb_transport_create_queue(void *data, struct device *client_dev,
 
        node = dev_to_node(&ndev->dev);
 
-       free_queue = ffs(nt->qp_bitmap);
+       free_queue = ffs(nt->qp_bitmap_free);
        if (!free_queue)
                goto err;
 
@@ -2273,9 +2273,8 @@ module_init(ntb_transport_init);
 
 static void __exit ntb_transport_exit(void)
 {
-       debugfs_remove_recursive(nt_debugfs_dir);
-
        ntb_unregister_client(&ntb_transport_client);
        bus_unregister(&ntb_transport_bus);
+       debugfs_remove_recursive(nt_debugfs_dir);
 }
 module_exit(ntb_transport_exit);
index e75d4fdc08663905eace859cff6cdebdb97e92b5..434e1d474f3340e1d35b48c924a6bebfbfb0fa67 100644 (file)
@@ -265,6 +265,8 @@ static ssize_t perf_copy(struct pthr_ctx *pctx, char __iomem *dst,
        if (dma_submit_error(cookie))
                goto err_set_unmap;
 
+       dmaengine_unmap_put(unmap);
+
        atomic_inc(&pctx->dma_sync);
        dma_async_issue_pending(chan);
 
index a518cb1b59d4238b675fccd695f45003af380296..ce3e8dfa10ad5ccc5285621e5c5b27b5c557e16b 100644 (file)
@@ -52,17 +52,17 @@ static void namespace_blk_release(struct device *dev)
        kfree(nsblk);
 }
 
-static struct device_type namespace_io_device_type = {
+static const struct device_type namespace_io_device_type = {
        .name = "nd_namespace_io",
        .release = namespace_io_release,
 };
 
-static struct device_type namespace_pmem_device_type = {
+static const struct device_type namespace_pmem_device_type = {
        .name = "nd_namespace_pmem",
        .release = namespace_pmem_release,
 };
 
-static struct device_type namespace_blk_device_type = {
+static const struct device_type namespace_blk_device_type = {
        .name = "nd_namespace_blk",
        .release = namespace_blk_release,
 };
@@ -962,8 +962,8 @@ static ssize_t __size_store(struct device *dev, unsigned long long val)
        struct nvdimm_drvdata *ndd;
        struct nd_label_id label_id;
        u32 flags = 0, remainder;
+       int rc, i, id = -1;
        u8 *uuid = NULL;
-       int rc, i;
 
        if (dev->driver || ndns->claim)
                return -EBUSY;
@@ -972,11 +972,13 @@ static ssize_t __size_store(struct device *dev, unsigned long long val)
                struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
 
                uuid = nspm->uuid;
+               id = nspm->id;
        } else if (is_namespace_blk(dev)) {
                struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
 
                uuid = nsblk->uuid;
                flags = NSLABEL_FLAG_LOCAL;
+               id = nsblk->id;
        }
 
        /*
@@ -1039,10 +1041,11 @@ static ssize_t __size_store(struct device *dev, unsigned long long val)
 
        /*
         * Try to delete the namespace if we deleted all of its
-        * allocation, this is not the seed device for the region, and
-        * it is not actively claimed by a btt instance.
+        * allocation, this is not the seed or 0th device for the
+        * region, and it is not actively claimed by a btt, pfn, or dax
+        * instance.
         */
-       if (val == 0 && nd_region->ns_seed != dev && !ndns->claim)
+       if (val == 0 && id != 0 && nd_region->ns_seed != dev && !ndns->claim)
                nd_device_unregister(dev, ND_ASYNC);
 
        return rc;
index a2ac9e641aa9341f2fcca9fa8b968fd874e80a90..6c033c9a2f06921feb49e3ba396d6cf150e19801 100644 (file)
@@ -627,15 +627,12 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
        size = resource_size(&nsio->res);
        npfns = (size - start_pad - end_trunc - SZ_8K) / SZ_4K;
        if (nd_pfn->mode == PFN_MODE_PMEM) {
-               unsigned long memmap_size;
-
                /*
                 * vmemmap_populate_hugepages() allocates the memmap array in
                 * HPAGE_SIZE chunks.
                 */
-               memmap_size = ALIGN(64 * npfns, HPAGE_SIZE);
-               offset = ALIGN(start + SZ_8K + memmap_size + dax_label_reserve,
-                               nd_pfn->align) - start;
+               offset = ALIGN(start + SZ_8K + 64 * npfns + dax_label_reserve,
+                               max(nd_pfn->align, HPAGE_SIZE)) - start;
        } else if (nd_pfn->mode == PFN_MODE_RAM)
                offset = ALIGN(start + SZ_8K + dax_label_reserve,
                                nd_pfn->align) - start;
index fcc9dcfdf67517d1352bef388a50d2dbf6c9a129..e65041c640cbc5bad3c284b77690605f071edfc3 100644 (file)
@@ -1663,13 +1663,13 @@ nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
                return 0;
 
        freq->sg_table.sgl = freq->first_sgl;
-       ret = sg_alloc_table_chained(&freq->sg_table, rq->nr_phys_segments,
-                       freq->sg_table.sgl);
+       ret = sg_alloc_table_chained(&freq->sg_table,
+                       blk_rq_nr_phys_segments(rq), freq->sg_table.sgl);
        if (ret)
                return -ENOMEM;
 
        op->nents = blk_rq_map_sg(rq->q, rq, freq->sg_table.sgl);
-       WARN_ON(op->nents > rq->nr_phys_segments);
+       WARN_ON(op->nents > blk_rq_nr_phys_segments(rq));
        dir = (rq_data_dir(rq) == WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
        freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl,
                                op->nents, dir);
index 6f5074153dcd9ea921f35751bbd622c18f227e49..be8c800078e2a6cbffa43208aa0326994d8e4bf5 100644 (file)
@@ -631,6 +631,7 @@ static void nvmet_subsys_release(struct config_item *item)
 {
        struct nvmet_subsys *subsys = to_subsys(item);
 
+       nvmet_subsys_del_ctrls(subsys);
        nvmet_subsys_put(subsys);
 }
 
index b1d66ed655c9ec36261fb4d31bca06555cccca39..fc5ba2f9e15f47fe8bd13795bdb9d6caaa532b93 100644 (file)
@@ -200,7 +200,7 @@ static void nvmet_keep_alive_timer(struct work_struct *work)
        pr_err("ctrl %d keep-alive timer (%d seconds) expired!\n",
                ctrl->cntlid, ctrl->kato);
 
-       ctrl->ops->delete_ctrl(ctrl);
+       nvmet_ctrl_fatal_error(ctrl);
 }
 
 static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
@@ -816,6 +816,9 @@ static void nvmet_ctrl_free(struct kref *ref)
        list_del(&ctrl->subsys_entry);
        mutex_unlock(&subsys->lock);
 
+       flush_work(&ctrl->async_event_work);
+       cancel_work_sync(&ctrl->fatal_err_work);
+
        ida_simple_remove(&subsys->cntlid_ida, ctrl->cntlid);
        nvmet_subsys_put(subsys);
 
@@ -935,6 +938,16 @@ static void nvmet_subsys_free(struct kref *ref)
        kfree(subsys);
 }
 
+void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys)
+{
+       struct nvmet_ctrl *ctrl;
+
+       mutex_lock(&subsys->lock);
+       list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
+               ctrl->ops->delete_ctrl(ctrl);
+       mutex_unlock(&subsys->lock);
+}
+
 void nvmet_subsys_put(struct nvmet_subsys *subsys)
 {
        kref_put(&subsys->ref, nvmet_subsys_free);
index 173e842f19c975a4a849c4120fcfcdcee73c26c1..ba57f9852bde33b0ff3d0655d4c08313632a3a8f 100644 (file)
@@ -1314,7 +1314,7 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
                        (struct fcnvme_ls_disconnect_rqst *)iod->rqstbuf;
        struct fcnvme_ls_disconnect_acc *acc =
                        (struct fcnvme_ls_disconnect_acc *)iod->rspbuf;
-       struct nvmet_fc_tgt_queue *queue;
+       struct nvmet_fc_tgt_queue *queue = NULL;
        struct nvmet_fc_tgt_assoc *assoc;
        int ret = 0;
        bool del_assoc = false;
@@ -1348,7 +1348,18 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
                assoc = nvmet_fc_find_target_assoc(tgtport,
                                be64_to_cpu(rqst->associd.association_id));
                iod->assoc = assoc;
-               if (!assoc)
+               if (assoc) {
+                       if (rqst->discon_cmd.scope ==
+                                       FCNVME_DISCONN_CONNECTION) {
+                               queue = nvmet_fc_find_target_queue(tgtport,
+                                               be64_to_cpu(
+                                                       rqst->discon_cmd.id));
+                               if (!queue) {
+                                       nvmet_fc_tgt_a_put(assoc);
+                                       ret = VERR_NO_CONN;
+                               }
+                       }
+               } else
                        ret = VERR_NO_ASSOC;
        }
 
@@ -1373,21 +1384,18 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
                        FCNVME_LS_DISCONNECT);
 
 
-       if (rqst->discon_cmd.scope == FCNVME_DISCONN_CONNECTION) {
-               queue = nvmet_fc_find_target_queue(tgtport,
-                                       be64_to_cpu(rqst->discon_cmd.id));
-               if (queue) {
-                       int qid = queue->qid;
+       /* are we to delete a Connection ID (queue) */
+       if (queue) {
+               int qid = queue->qid;
 
-                       nvmet_fc_delete_target_queue(queue);
+               nvmet_fc_delete_target_queue(queue);
 
-                       /* release the get taken by find_target_queue */
-                       nvmet_fc_tgt_q_put(queue);
+               /* release the get taken by find_target_queue */
+               nvmet_fc_tgt_q_put(queue);
 
-                       /* tear association down if io queue terminated */
-                       if (!qid)
-                               del_assoc = true;
-               }
+               /* tear association down if io queue terminated */
+               if (!qid)
+                       del_assoc = true;
        }
 
        /* release get taken in nvmet_fc_find_target_assoc */
index 23d5eb1c944f64c485fef8551a41a72151492f2c..cc7ad06b43a78a029dd76fac575c3f6ee57c9e92 100644 (file)
@@ -282,6 +282,7 @@ void nvmet_ctrl_put(struct nvmet_ctrl *ctrl);
 struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
                enum nvme_subsys_type type);
 void nvmet_subsys_put(struct nvmet_subsys *subsys);
+void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys);
 
 struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid);
 void nvmet_put_namespace(struct nvmet_ns *ns);
index 8c3760a78ac080af522afb5892d471063243bd7b..60990220bd831074bc3c8fdbd044ee3aed37a2db 100644 (file)
@@ -438,6 +438,10 @@ static int nvmet_rdma_post_recv(struct nvmet_rdma_device *ndev,
 {
        struct ib_recv_wr *bad_wr;
 
+       ib_dma_sync_single_for_device(ndev->device,
+               cmd->sge[0].addr, cmd->sge[0].length,
+               DMA_FROM_DEVICE);
+
        if (ndev->srq)
                return ib_post_srq_recv(ndev->srq, &cmd->wr, &bad_wr);
        return ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, &bad_wr);
@@ -538,6 +542,11 @@ static void nvmet_rdma_queue_response(struct nvmet_req *req)
                first_wr = &rsp->send_wr;
 
        nvmet_rdma_post_recv(rsp->queue->dev, rsp->cmd);
+
+       ib_dma_sync_single_for_device(rsp->queue->dev->device,
+               rsp->send_sge.addr, rsp->send_sge.length,
+               DMA_TO_DEVICE);
+
        if (ib_post_send(cm_id->qp, first_wr, &bad_wr)) {
                pr_err("sending cmd response failed\n");
                nvmet_rdma_release_rsp(rsp);
@@ -698,6 +707,14 @@ static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue,
        cmd->n_rdma = 0;
        cmd->req.port = queue->port;
 
+
+       ib_dma_sync_single_for_cpu(queue->dev->device,
+               cmd->cmd->sge[0].addr, cmd->cmd->sge[0].length,
+               DMA_FROM_DEVICE);
+       ib_dma_sync_single_for_cpu(queue->dev->device,
+               cmd->send_sge.addr, cmd->send_sge.length,
+               DMA_TO_DEVICE);
+
        if (!nvmet_req_init(&cmd->req, &queue->nvme_cq,
                        &queue->nvme_sq, &nvmet_rdma_ops))
                return;
index dd6d4ccb41e4df9e97cd54d5f19337f3c69a6266..3858b87fd0bb7d35656161aa0a4d55855bf689c9 100644 (file)
@@ -293,7 +293,7 @@ struct parport *parport_gsc_probe_port(unsigned long base,
                p->irq = PARPORT_IRQ_NONE;
        }
        if (p->irq != PARPORT_IRQ_NONE) {
-               printk(", irq %d", p->irq);
+               pr_cont(", irq %d", p->irq);
 
                if (p->dma == PARPORT_DMA_AUTO) {
                        p->dma = PARPORT_DMA_NONE;
@@ -303,8 +303,8 @@ struct parport *parport_gsc_probe_port(unsigned long base,
                                            is mandatory (see above) */
                p->dma = PARPORT_DMA_NONE;
 
-       printk(" [");
-#define printmode(x) {if(p->modes&PARPORT_MODE_##x){printk("%s%s",f?",":"",#x);f++;}}
+       pr_cont(" [");
+#define printmode(x) {if(p->modes&PARPORT_MODE_##x){pr_cont("%s%s",f?",":"",#x);f++;}}
        {
                int f = 0;
                printmode(PCSPP);
@@ -315,7 +315,7 @@ struct parport *parport_gsc_probe_port(unsigned long base,
 //             printmode(DMA);
        }
 #undef printmode
-       printk("]\n");
+       pr_cont("]\n");
 
        if (p->irq != PARPORT_IRQ_NONE) {
                if (request_irq (p->irq, parport_irq_handler,
index 10c9c0ba8ff2394dc8b43d640e5dbd4d53c5e84e..ec0b4c11ccd9dc95cdb738d03ae292744e34127e 100644 (file)
@@ -31,7 +31,6 @@
 #include <linux/kernel.h>
 #include <linux/types.h>
 #include <linux/slab.h>
-#include <linux/pm_runtime.h>
 #include <linux/pci.h>
 #include "../pci.h"
 #include "pciehp.h"
@@ -99,7 +98,6 @@ static int board_added(struct slot *p_slot)
        pciehp_green_led_blink(p_slot);
 
        /* Check link training status */
-       pm_runtime_get_sync(&ctrl->pcie->port->dev);
        retval = pciehp_check_link_status(ctrl);
        if (retval) {
                ctrl_err(ctrl, "Failed to check link status\n");
@@ -120,14 +118,12 @@ static int board_added(struct slot *p_slot)
                if (retval != -EEXIST)
                        goto err_exit;
        }
-       pm_runtime_put(&ctrl->pcie->port->dev);
 
        pciehp_green_led_on(p_slot);
        pciehp_set_attention_status(p_slot, 0);
        return 0;
 
 err_exit:
-       pm_runtime_put(&ctrl->pcie->port->dev);
        set_slot_off(ctrl, p_slot);
        return retval;
 }
@@ -141,9 +137,7 @@ static int remove_board(struct slot *p_slot)
        int retval;
        struct controller *ctrl = p_slot->ctrl;
 
-       pm_runtime_get_sync(&ctrl->pcie->port->dev);
        retval = pciehp_unconfigure_device(p_slot);
-       pm_runtime_put(&ctrl->pcie->port->dev);
        if (retval)
                return retval;
 
index 56efaf72d08e589bbaa13b15513534b144c1b174..d2961ef39a3a0f4106304de84287edff830a7d8c 100644 (file)
@@ -155,7 +155,7 @@ static void pnv_php_detach_device_nodes(struct device_node *parent)
                pnv_php_detach_device_nodes(dn);
 
                of_node_put(dn);
-               refcount = atomic_read(&dn->kobj.kref.refcount);
+               refcount = kref_read(&dn->kobj.kref);
                if (refcount != 1)
                        pr_warn("Invalid refcount %d on <%s>\n",
                                refcount, of_node_full_name(dn));
index 50c5003295ca535036b056d7855caf0c96473f13..7f73bacf13ed9ef212ef5c51d0ab301310be3a75 100644 (file)
@@ -1206,6 +1206,16 @@ int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
        if (flags & PCI_IRQ_AFFINITY) {
                if (!affd)
                        affd = &msi_default_affd;
+
+               if (affd->pre_vectors + affd->post_vectors > min_vecs)
+                       return -EINVAL;
+
+               /*
+                * If there aren't any vectors left after applying the pre/post
+                * vectors don't bother with assigning affinity.
+                */
+               if (affd->pre_vectors + affd->post_vectors == min_vecs)
+                       affd = NULL;
        } else {
                if (WARN_ON(affd))
                        affd = NULL;
index a881c0d3d2e87e023bd9f68eb35b9d57ac2e3a9f..7904d02ffdb97e5f23d915e9c83edbf599ddf58b 100644 (file)
@@ -2241,10 +2241,13 @@ bool pci_bridge_d3_possible(struct pci_dev *bridge)
                        return false;
 
                /*
-                * Hotplug ports handled by firmware in System Management Mode
+                * Hotplug interrupts cannot be delivered if the link is down,
+                * so parents of a hotplug port must stay awake. In addition,
+                * hotplug ports handled by firmware in System Management Mode
                 * may not be put into D3 by the OS (Thunderbolt on non-Macs).
+                * For simplicity, disallow in general for now.
                 */
-               if (bridge->is_hotplug_bridge && !pciehp_is_native(bridge))
+               if (bridge->is_hotplug_bridge)
                        return false;
 
                if (pci_bridge_d3_force)
@@ -2276,10 +2279,7 @@ static int pci_dev_check_d3cold(struct pci_dev *dev, void *data)
             !pci_pme_capable(dev, PCI_D3cold)) ||
 
            /* If it is a bridge it must be allowed to go to D3. */
-           !pci_power_manageable(dev) ||
-
-           /* Hotplug interrupts cannot be delivered if the link is down. */
-           dev->is_hotplug_bridge)
+           !pci_power_manageable(dev))
 
                *d3cold_ok = false;
 
index 17ac1dce32867051298a5489841de8b636835a68..3dd8bcbb3011babd4ad4271d6f6f64733bd9b3f1 100644 (file)
@@ -532,25 +532,32 @@ static struct pcie_link_state *alloc_pcie_link_state(struct pci_dev *pdev)
        link = kzalloc(sizeof(*link), GFP_KERNEL);
        if (!link)
                return NULL;
+
        INIT_LIST_HEAD(&link->sibling);
        INIT_LIST_HEAD(&link->children);
        INIT_LIST_HEAD(&link->link);
        link->pdev = pdev;
-       if (pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT) {
+
+       /*
+        * Root Ports and PCI/PCI-X to PCIe Bridges are roots of PCIe
+        * hierarchies.
+        */
+       if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT ||
+           pci_pcie_type(pdev) == PCI_EXP_TYPE_PCIE_BRIDGE) {
+               link->root = link;
+       } else {
                struct pcie_link_state *parent;
+
                parent = pdev->bus->parent->self->link_state;
                if (!parent) {
                        kfree(link);
                        return NULL;
                }
+
                link->parent = parent;
+               link->root = link->parent->root;
                list_add(&link->link, &parent->children);
        }
-       /* Setup a pointer to the root port link */
-       if (!link->parent)
-               link->root = link;
-       else
-               link->root = link->parent->root;
 
        list_add(&link->sibling, &link_list);
        pdev->link_state = link;
index 717529331dace5f45bb7337b1fb28adb7e81db5c..2dd1c68e6de8e88aa4757a1e6262cff64521db13 100644 (file)
@@ -433,6 +433,17 @@ static int pcie_pme_resume(struct pcie_device *srv)
        return 0;
 }
 
+/**
+ * pcie_pme_remove - Prepare PCIe PME service device for removal.
+ * @srv - PCIe service device to remove.
+ */
+static void pcie_pme_remove(struct pcie_device *srv)
+{
+       pcie_pme_suspend(srv);
+       free_irq(srv->irq, srv);
+       kfree(get_service_data(srv));
+}
+
 static struct pcie_port_service_driver pcie_pme_driver = {
        .name           = "pcie_pme",
        .port_type      = PCI_EXP_TYPE_ROOT_PORT,
@@ -441,6 +452,7 @@ static struct pcie_port_service_driver pcie_pme_driver = {
        .probe          = pcie_pme_probe,
        .suspend        = pcie_pme_suspend,
        .resume         = pcie_pme_resume,
+       .remove         = pcie_pme_remove,
 };
 
 /**
index 429d34c348b9fbddc373e22876ad4f5017788a8d..e42909524deed903e098cc2cade5a21626181f70 100644 (file)
@@ -345,7 +345,7 @@ EXPORT_SYMBOL_GPL(pci_create_slot);
 void pci_destroy_slot(struct pci_slot *slot)
 {
        dev_dbg(&slot->bus->dev, "dev %02x, dec refcount to %d\n",
-               slot->number, atomic_read(&slot->kobj.kref.refcount) - 1);
+               slot->number, kref_read(&slot->kobj.kref) - 1);
 
        mutex_lock(&pci_slot_mutex);
        kobject_put(&slot->kobj);
index 09172043d5890735127b0f79275a98704cdbd2b6..c617ec49e9edeeebb1f33b78fa6b7214fb23207c 100644 (file)
@@ -217,7 +217,7 @@ static const struct berlin_desc_group berlin4ct_soc_pinctrl_groups[] = {
        BERLIN_PINCTRL_GROUP("SCRD0_CRD_PRES", 0xc, 0x3, 0x15,
                        BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO20 */
                        BERLIN_PINCTRL_FUNCTION(0x1, "scrd0"), /* crd pres */
-                       BERLIN_PINCTRL_FUNCTION(0x1, "sd1a")), /* DAT3 */
+                       BERLIN_PINCTRL_FUNCTION(0x3, "sd1a")), /* DAT3 */
        BERLIN_PINCTRL_GROUP("SPI1_SS0n", 0xc, 0x3, 0x18,
                        BERLIN_PINCTRL_FUNCTION(0x0, "spi1"), /* SS0n */
                        BERLIN_PINCTRL_FUNCTION(0x1, "gpio"), /* GPIO37 */
index 37300634b7d2c853a05f5dcecce6b9d46d187b59..d94aef17348b4b88f3952670886b863b952cfc60 100644 (file)
@@ -731,16 +731,23 @@ static void __iomem *byt_gpio_reg(struct byt_gpio *vg, unsigned int offset,
                                  int reg)
 {
        struct byt_community *comm = byt_get_community(vg, offset);
-       u32 reg_offset = 0;
+       u32 reg_offset;
 
        if (!comm)
                return NULL;
 
        offset -= comm->pin_base;
-       if (reg == BYT_INT_STAT_REG)
+       switch (reg) {
+       case BYT_INT_STAT_REG:
                reg_offset = (offset / 32) * 4;
-       else
+               break;
+       case BYT_DEBOUNCE_REG:
+               reg_offset = 0;
+               break;
+       default:
                reg_offset = comm->pad_map[offset] * 16;
+               break;
+       }
 
        return comm->reg_base + reg_offset + reg;
 }
@@ -1092,6 +1099,7 @@ static int byt_pin_config_get(struct pinctrl_dev *pctl_dev, unsigned int offset,
        enum pin_config_param param = pinconf_to_config_param(*config);
        void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG);
        void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
+       void __iomem *db_reg = byt_gpio_reg(vg, offset, BYT_DEBOUNCE_REG);
        unsigned long flags;
        u32 conf, pull, val, debounce;
        u16 arg = 0;
@@ -1128,7 +1136,7 @@ static int byt_pin_config_get(struct pinctrl_dev *pctl_dev, unsigned int offset,
                        return -EINVAL;
 
                raw_spin_lock_irqsave(&vg->lock, flags);
-               debounce = readl(byt_gpio_reg(vg, offset, BYT_DEBOUNCE_REG));
+               debounce = readl(db_reg);
                raw_spin_unlock_irqrestore(&vg->lock, flags);
 
                switch (debounce & BYT_DEBOUNCE_PULSE_MASK) {
@@ -1176,6 +1184,7 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
        unsigned int param, arg;
        void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG);
        void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
+       void __iomem *db_reg = byt_gpio_reg(vg, offset, BYT_DEBOUNCE_REG);
        unsigned long flags;
        u32 conf, val, debounce;
        int i, ret = 0;
@@ -1238,36 +1247,44 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
 
                        break;
                case PIN_CONFIG_INPUT_DEBOUNCE:
-                       debounce = readl(byt_gpio_reg(vg, offset,
-                                                     BYT_DEBOUNCE_REG));
-                       conf &= ~BYT_DEBOUNCE_PULSE_MASK;
+                       debounce = readl(db_reg);
+                       debounce &= ~BYT_DEBOUNCE_PULSE_MASK;
+
+                       if (arg)
+                               conf |= BYT_DEBOUNCE_EN;
+                       else
+                               conf &= ~BYT_DEBOUNCE_EN;
 
                        switch (arg) {
                        case 375:
-                               conf |= BYT_DEBOUNCE_PULSE_375US;
+                               debounce |= BYT_DEBOUNCE_PULSE_375US;
                                break;
                        case 750:
-                               conf |= BYT_DEBOUNCE_PULSE_750US;
+                               debounce |= BYT_DEBOUNCE_PULSE_750US;
                                break;
                        case 1500:
-                               conf |= BYT_DEBOUNCE_PULSE_1500US;
+                               debounce |= BYT_DEBOUNCE_PULSE_1500US;
                                break;
                        case 3000:
-                               conf |= BYT_DEBOUNCE_PULSE_3MS;
+                               debounce |= BYT_DEBOUNCE_PULSE_3MS;
                                break;
                        case 6000:
-                               conf |= BYT_DEBOUNCE_PULSE_6MS;
+                               debounce |= BYT_DEBOUNCE_PULSE_6MS;
                                break;
                        case 12000:
-                               conf |= BYT_DEBOUNCE_PULSE_12MS;
+                               debounce |= BYT_DEBOUNCE_PULSE_12MS;
                                break;
                        case 24000:
-                               conf |= BYT_DEBOUNCE_PULSE_24MS;
+                               debounce |= BYT_DEBOUNCE_PULSE_24MS;
                                break;
                        default:
-                               ret = -EINVAL;
+                               if (arg)
+                                       ret = -EINVAL;
+                               break;
                        }
 
+                       if (!ret)
+                               writel(debounce, db_reg);
                        break;
                default:
                        ret = -ENOTSUPP;
@@ -1606,7 +1623,9 @@ static void byt_gpio_irq_handler(struct irq_desc *desc)
                        continue;
                }
 
+               raw_spin_lock(&vg->lock);
                pending = readl(reg);
+               raw_spin_unlock(&vg->lock);
                for_each_set_bit(pin, &pending, 32) {
                        virq = irq_find_mapping(vg->chip.irqdomain, base + pin);
                        generic_handle_irq(virq);
@@ -1617,6 +1636,8 @@ static void byt_gpio_irq_handler(struct irq_desc *desc)
 
 static void byt_gpio_irq_init_hw(struct byt_gpio *vg)
 {
+       struct gpio_chip *gc = &vg->chip;
+       struct device *dev = &vg->pdev->dev;
        void __iomem *reg;
        u32 base, value;
        int i;
@@ -1638,10 +1659,12 @@ static void byt_gpio_irq_init_hw(struct byt_gpio *vg)
                }
 
                value = readl(reg);
-               if ((value & BYT_PIN_MUX) == byt_get_gpio_mux(vg, i) &&
-                   !(value & BYT_DIRECT_IRQ_EN)) {
+               if (value & BYT_DIRECT_IRQ_EN) {
+                       clear_bit(i, gc->irq_valid_mask);
+                       dev_dbg(dev, "excluding GPIO %d from IRQ domain\n", i);
+               } else if ((value & BYT_PIN_MUX) == byt_get_gpio_mux(vg, i)) {
                        byt_gpio_clear_triggering(vg, i);
-                       dev_dbg(&vg->pdev->dev, "disabling GPIO %d\n", i);
+                       dev_dbg(dev, "disabling GPIO %d\n", i);
                }
        }
 
@@ -1680,6 +1703,7 @@ static int byt_gpio_probe(struct byt_gpio *vg)
        gc->can_sleep   = false;
        gc->parent      = &vg->pdev->dev;
        gc->ngpio       = vg->soc_data->npins;
+       gc->irq_need_valid_mask = true;
 
 #ifdef CONFIG_PM_SLEEP
        vg->saved_context = devm_kcalloc(&vg->pdev->dev, gc->ngpio,
index 59cb7a6fc5bef316d042f93da72792edca2ea8d9..901b356b09d71679a2b4a03f7cd57b30a22fa6f4 100644 (file)
@@ -19,7 +19,7 @@
 
 #define BXT_PAD_OWN    0x020
 #define BXT_HOSTSW_OWN 0x080
-#define BXT_PADCFGLOCK 0x090
+#define BXT_PADCFGLOCK 0x060
 #define BXT_GPI_IE     0x110
 
 #define BXT_COMMUNITY(s, e)                            \
index 1e139672f1af9da0fa7ff4af1a919395e2ea6957..6df35dcb29aea68c0ddec6cbd29bb1c9a3abd56c 100644 (file)
@@ -353,6 +353,21 @@ static int intel_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned function,
        return 0;
 }
 
+static void __intel_gpio_set_direction(void __iomem *padcfg0, bool input)
+{
+       u32 value;
+
+       value = readl(padcfg0);
+       if (input) {
+               value &= ~PADCFG0_GPIORXDIS;
+               value |= PADCFG0_GPIOTXDIS;
+       } else {
+               value &= ~PADCFG0_GPIOTXDIS;
+               value |= PADCFG0_GPIORXDIS;
+       }
+       writel(value, padcfg0);
+}
+
 static int intel_gpio_request_enable(struct pinctrl_dev *pctldev,
                                     struct pinctrl_gpio_range *range,
                                     unsigned pin)
@@ -375,11 +390,11 @@ static int intel_gpio_request_enable(struct pinctrl_dev *pctldev,
        /* Disable SCI/SMI/NMI generation */
        value &= ~(PADCFG0_GPIROUTIOXAPIC | PADCFG0_GPIROUTSCI);
        value &= ~(PADCFG0_GPIROUTSMI | PADCFG0_GPIROUTNMI);
-       /* Disable TX buffer and enable RX (this will be input) */
-       value &= ~PADCFG0_GPIORXDIS;
-       value |= PADCFG0_GPIOTXDIS;
        writel(value, padcfg0);
 
+       /* Disable TX buffer and enable RX (this will be input) */
+       __intel_gpio_set_direction(padcfg0, true);
+
        raw_spin_unlock_irqrestore(&pctrl->lock, flags);
 
        return 0;
@@ -392,18 +407,11 @@ static int intel_gpio_set_direction(struct pinctrl_dev *pctldev,
        struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
        void __iomem *padcfg0;
        unsigned long flags;
-       u32 value;
 
        raw_spin_lock_irqsave(&pctrl->lock, flags);
 
        padcfg0 = intel_get_padcfg(pctrl, pin, PADCFG0);
-
-       value = readl(padcfg0);
-       if (input)
-               value |= PADCFG0_GPIOTXDIS;
-       else
-               value &= ~PADCFG0_GPIOTXDIS;
-       writel(value, padcfg0);
+       __intel_gpio_set_direction(padcfg0, input);
 
        raw_spin_unlock_irqrestore(&pctrl->lock, flags);
 
index b21896126f760a5cbae044ab13cba527e9dbfeff..4d4ef42a39b5faaa1969d20a5aeeedffef90074c 100644 (file)
@@ -794,6 +794,9 @@ static int mrfld_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
        unsigned int i;
        int ret;
 
+       if (!mrfld_buf_available(mp, pin))
+               return -ENOTSUPP;
+
        for (i = 0; i < nconfigs; i++) {
                switch (pinconf_to_config_param(configs[i])) {
                case PIN_CONFIG_BIAS_DISABLE:
index c3928aa3fefa9a1d24b0214e877bbac2bc15f67e..e0bca4df2a2f3188da0d559a29013893a5bea528 100644 (file)
@@ -253,9 +253,8 @@ static const unsigned int uart_tx_ao_a_pins[]       = { PIN(GPIOAO_0, 0) };
 static const unsigned int uart_rx_ao_a_pins[]  = { PIN(GPIOAO_1, 0) };
 static const unsigned int uart_cts_ao_a_pins[] = { PIN(GPIOAO_2, 0) };
 static const unsigned int uart_rts_ao_a_pins[] = { PIN(GPIOAO_3, 0) };
-static const unsigned int uart_tx_ao_b_pins[]  = { PIN(GPIOAO_0, 0) };
-static const unsigned int uart_rx_ao_b_pins[]  = { PIN(GPIOAO_1, 0),
-                                                   PIN(GPIOAO_5, 0) };
+static const unsigned int uart_tx_ao_b_pins[]  = { PIN(GPIOAO_4, 0) };
+static const unsigned int uart_rx_ao_b_pins[]  = { PIN(GPIOAO_5, 0) };
 static const unsigned int uart_cts_ao_b_pins[] = { PIN(GPIOAO_2, 0) };
 static const unsigned int uart_rts_ao_b_pins[] = { PIN(GPIOAO_3, 0) };
 
@@ -498,7 +497,7 @@ static struct meson_pmx_group meson_gxbb_aobus_groups[] = {
        GPIO_GROUP(GPIOAO_13, 0),
 
        /* bank AO */
-       GROUP(uart_tx_ao_b,     0,      26),
+       GROUP(uart_tx_ao_b,     0,      24),
        GROUP(uart_rx_ao_b,     0,      25),
        GROUP(uart_tx_ao_a,     0,      12),
        GROUP(uart_rx_ao_a,     0,      11),
index 25694f7094c714bbf35eee2ae7b51e2b4ce5b1e9..b69743b07a1d591ace36d410583231319234d4f0 100644 (file)
@@ -214,9 +214,8 @@ static const unsigned int uart_tx_ao_a_pins[]       = { PIN(GPIOAO_0, 0) };
 static const unsigned int uart_rx_ao_a_pins[]  = { PIN(GPIOAO_1, 0) };
 static const unsigned int uart_cts_ao_a_pins[] = { PIN(GPIOAO_2, 0) };
 static const unsigned int uart_rts_ao_a_pins[] = { PIN(GPIOAO_3, 0) };
-static const unsigned int uart_tx_ao_b_pins[]  = { PIN(GPIOAO_0, 0) };
-static const unsigned int uart_rx_ao_b_pins[]  = { PIN(GPIOAO_1, 0),
-                                                   PIN(GPIOAO_5, 0) };
+static const unsigned int uart_tx_ao_b_pins[]  = { PIN(GPIOAO_4, 0) };
+static const unsigned int uart_rx_ao_b_pins[]  = { PIN(GPIOAO_5, 0) };
 static const unsigned int uart_cts_ao_b_pins[] = { PIN(GPIOAO_2, 0) };
 static const unsigned int uart_rts_ao_b_pins[] = { PIN(GPIOAO_3, 0) };
 
@@ -409,7 +408,7 @@ static struct meson_pmx_group meson_gxl_aobus_groups[] = {
        GPIO_GROUP(GPIOAO_9, 0),
 
        /* bank AO */
-       GROUP(uart_tx_ao_b,     0,      26),
+       GROUP(uart_tx_ao_b,     0,      24),
        GROUP(uart_rx_ao_b,     0,      25),
        GROUP(uart_tx_ao_a,     0,      12),
        GROUP(uart_rx_ao_a,     0,      11),
index c9a146948192dba19ca5da1587791c25b315d628..537b52055756645a8f225dd7e96b191d7d841e96 100644 (file)
@@ -202,6 +202,8 @@ static void amd_gpio_dbg_show(struct seq_file *s, struct gpio_chip *gc)
                        i = 128;
                        pin_num = AMD_GPIO_PINS_BANK2 + i;
                        break;
+               default:
+                       return;
                }
 
                for (; i < pin_num; i++) {
index 0eb51e33cb1be5412ab11d10e7cdb474a2faa061..207a8de4e1ed851cf542aa4af008e8f74102cad3 100644 (file)
@@ -564,8 +564,7 @@ static int sunxi_pconf_group_set(struct pinctrl_dev *pctldev,
                        val = arg / 10 - 1;
                        break;
                case PIN_CONFIG_BIAS_DISABLE:
-                       val = 0;
-                       break;
+                       continue;
                case PIN_CONFIG_BIAS_PULL_UP:
                        if (arg == 0)
                                return -EINVAL;
index aa8bd9794683b715013c82aa9220d11cfb0ea595..96686336e3a396254b9473f01f1776e0297301ce 100644 (file)
@@ -561,7 +561,7 @@ static const int ether_rgmii_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
                                          0, 0, 0, 0};
 static const unsigned ether_rmii_pins[] = {30, 31, 32, 33, 34, 35, 36, 37, 39,
                                           41, 42, 45};
-static const int ether_rmii_muxvals[] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1};
+static const int ether_rmii_muxvals[] = {0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1};
 static const unsigned i2c0_pins[] = {63, 64};
 static const int i2c0_muxvals[] = {0, 0};
 static const unsigned i2c1_pins[] = {65, 66};
index 59aa8e302bc3f9ab9dd26b43e6bf53ef812631fd..49a594855f98f43d151e44f5d07449cba1a5b095 100644 (file)
@@ -816,13 +816,6 @@ config INTEL_SCU_IPC_UTIL
          low level access for debug work and updating the firmware. Say
          N unless you will be doing this on an Intel MID platform.
 
-config GPIO_INTEL_PMIC
-       bool "Intel PMIC GPIO support"
-       depends on INTEL_SCU_IPC && GPIOLIB
-       ---help---
-         Say Y here to support GPIO via the SCU IPC interface
-         on Intel MID platforms.
-
 config INTEL_MID_POWER_BUTTON
        tristate "power button driver for Intel MID platforms"
        depends on INTEL_SCU_IPC && INPUT
index d4111f0f8a78fdbcc7a10ef5e236bdda1cdffc15..b2f52a7690af2239fae006668a7f7bbdfc00cd10 100644 (file)
@@ -50,7 +50,6 @@ obj-$(CONFIG_INTEL_SCU_IPC)   += intel_scu_ipc.o
 obj-$(CONFIG_INTEL_SCU_IPC_UTIL) += intel_scu_ipcutil.o
 obj-$(CONFIG_INTEL_MFLD_THERMAL) += intel_mid_thermal.o
 obj-$(CONFIG_INTEL_IPS)                += intel_ips.o
-obj-$(CONFIG_GPIO_INTEL_PMIC)  += intel_pmic_gpio.o
 obj-$(CONFIG_XO1_RFKILL)       += xo1-rfkill.o
 obj-$(CONFIG_XO15_EBOOK)       += xo15-ebook.o
 obj-$(CONFIG_IBM_RTL)          += ibm_rtl.o
index 410741acb3c92dabe36417800f564a943c5d42ec..f46ece2ce3c4d48086c73b0e2d0c63ee1fe35893 100644 (file)
@@ -813,6 +813,7 @@ static void ideapad_acpi_notify(acpi_handle handle, u32 event, void *data)
                        case 8:
                        case 7:
                        case 6:
+                       case 1:
                                ideapad_input_report(priv, vpc_bit);
                                break;
                        case 5:
index 1fc0de870ff826e8b90956ab557cc83008e1ce68..361770568ad03a6e7a3bc7e6d579ccacae1725b7 100644 (file)
@@ -77,7 +77,7 @@ static int mfld_pb_probe(struct platform_device *pdev)
 
        input_set_capability(input, EV_KEY, KEY_POWER);
 
-       error = request_threaded_irq(irq, NULL, mfld_pb_isr, 0,
+       error = request_threaded_irq(irq, NULL, mfld_pb_isr, IRQF_ONESHOT,
                                     DRIVER_NAME, input);
        if (error) {
                dev_err(&pdev->dev, "Unable to request irq %d for mfld power"
diff --git a/drivers/platform/x86/intel_pmic_gpio.c b/drivers/platform/x86/intel_pmic_gpio.c
deleted file mode 100644 (file)
index 91ae585..0000000
+++ /dev/null
@@ -1,326 +0,0 @@
-/* Moorestown PMIC GPIO (access through IPC) driver
- * Copyright (c) 2008 - 2009, Intel Corporation.
- *
- * Author: Alek Du <alek.du@intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-/* Supports:
- * Moorestown platform PMIC chip
- */
-
-#define pr_fmt(fmt) "%s: " fmt, __func__
-
-#include <linux/kernel.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/stddef.h>
-#include <linux/slab.h>
-#include <linux/ioport.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/gpio/driver.h>
-#include <asm/intel_scu_ipc.h>
-#include <linux/device.h>
-#include <linux/intel_pmic_gpio.h>
-#include <linux/platform_device.h>
-
-#define DRIVER_NAME "pmic_gpio"
-
-/* register offset that IPC driver should use
- * 8 GPIO + 8 GPOSW (6 controllable) + 8GPO
- */
-enum pmic_gpio_register {
-       GPIO0           = 0xE0,
-       GPIO7           = 0xE7,
-       GPIOINT         = 0xE8,
-       GPOSWCTL0       = 0xEC,
-       GPOSWCTL5       = 0xF1,
-       GPO             = 0xF4,
-};
-
-/* bits definition for GPIO & GPOSW */
-#define GPIO_DRV 0x01
-#define GPIO_DIR 0x02
-#define GPIO_DIN 0x04
-#define GPIO_DOU 0x08
-#define GPIO_INTCTL 0x30
-#define GPIO_DBC 0xc0
-
-#define GPOSW_DRV 0x01
-#define GPOSW_DOU 0x08
-#define GPOSW_RDRV 0x30
-
-#define GPIO_UPDATE_TYPE       0x80000000
-
-#define NUM_GPIO 24
-
-struct pmic_gpio {
-       struct mutex            buslock;
-       struct gpio_chip        chip;
-       void                    *gpiointr;
-       int                     irq;
-       unsigned                irq_base;
-       unsigned int            update_type;
-       u32                     trigger_type;
-};
-
-static void pmic_program_irqtype(int gpio, int type)
-{
-       if (type & IRQ_TYPE_EDGE_RISING)
-               intel_scu_ipc_update_register(GPIO0 + gpio, 0x20, 0x20);
-       else
-               intel_scu_ipc_update_register(GPIO0 + gpio, 0x00, 0x20);
-
-       if (type & IRQ_TYPE_EDGE_FALLING)
-               intel_scu_ipc_update_register(GPIO0 + gpio, 0x10, 0x10);
-       else
-               intel_scu_ipc_update_register(GPIO0 + gpio, 0x00, 0x10);
-};
-
-static int pmic_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
-{
-       if (offset >= 8) {
-               pr_err("only pin 0-7 support input\n");
-               return -1;/* we only have 8 GPIO can use as input */
-       }
-       return intel_scu_ipc_update_register(GPIO0 + offset,
-                                                       GPIO_DIR, GPIO_DIR);
-}
-
-static int pmic_gpio_direction_output(struct gpio_chip *chip,
-                       unsigned offset, int value)
-{
-       int rc = 0;
-
-       if (offset < 8)/* it is GPIO */
-               rc = intel_scu_ipc_update_register(GPIO0 + offset,
-                               GPIO_DRV | (value ? GPIO_DOU : 0),
-                               GPIO_DRV | GPIO_DOU | GPIO_DIR);
-       else if (offset < 16)/* it is GPOSW */
-               rc = intel_scu_ipc_update_register(GPOSWCTL0 + offset - 8,
-                               GPOSW_DRV | (value ? GPOSW_DOU : 0),
-                               GPOSW_DRV | GPOSW_DOU | GPOSW_RDRV);
-       else if (offset > 15 && offset < 24)/* it is GPO */
-               rc = intel_scu_ipc_update_register(GPO,
-                               value ? 1 << (offset - 16) : 0,
-                               1 << (offset - 16));
-       else {
-               pr_err("invalid PMIC GPIO pin %d!\n", offset);
-               WARN_ON(1);
-       }
-
-       return rc;
-}
-
-static int pmic_gpio_get(struct gpio_chip *chip, unsigned offset)
-{
-       u8 r;
-       int ret;
-
-       /* we only have 8 GPIO pins we can use as input */
-       if (offset >= 8)
-               return -EOPNOTSUPP;
-       ret = intel_scu_ipc_ioread8(GPIO0 + offset, &r);
-       if (ret < 0)
-               return ret;
-       return r & GPIO_DIN;
-}
-
-static void pmic_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
-{
-       if (offset < 8)/* it is GPIO */
-               intel_scu_ipc_update_register(GPIO0 + offset,
-                       GPIO_DRV | (value ? GPIO_DOU : 0),
-                       GPIO_DRV | GPIO_DOU);
-       else if (offset < 16)/* it is GPOSW */
-               intel_scu_ipc_update_register(GPOSWCTL0 + offset - 8,
-                       GPOSW_DRV | (value ? GPOSW_DOU : 0),
-                       GPOSW_DRV | GPOSW_DOU | GPOSW_RDRV);
-       else if (offset > 15 && offset < 24) /* it is GPO */
-               intel_scu_ipc_update_register(GPO,
-                       value ? 1 << (offset - 16) : 0,
-                       1 << (offset - 16));
-}
-
-/*
- * This is called from genirq with pg->buslock locked and
- * irq_desc->lock held. We can not access the scu bus here, so we
- * store the change and update in the bus_sync_unlock() function below
- */
-static int pmic_irq_type(struct irq_data *data, unsigned type)
-{
-       struct pmic_gpio *pg = irq_data_get_irq_chip_data(data);
-       u32 gpio = data->irq - pg->irq_base;
-
-       if (gpio >= pg->chip.ngpio)
-               return -EINVAL;
-
-       pg->trigger_type = type;
-       pg->update_type = gpio | GPIO_UPDATE_TYPE;
-       return 0;
-}
-
-static int pmic_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
-{
-       struct pmic_gpio *pg = gpiochip_get_data(chip);
-
-       return pg->irq_base + offset;
-}
-
-static void pmic_bus_lock(struct irq_data *data)
-{
-       struct pmic_gpio *pg = irq_data_get_irq_chip_data(data);
-
-       mutex_lock(&pg->buslock);
-}
-
-static void pmic_bus_sync_unlock(struct irq_data *data)
-{
-       struct pmic_gpio *pg = irq_data_get_irq_chip_data(data);
-
-       if (pg->update_type) {
-               unsigned int gpio = pg->update_type & ~GPIO_UPDATE_TYPE;
-
-               pmic_program_irqtype(gpio, pg->trigger_type);
-               pg->update_type = 0;
-       }
-       mutex_unlock(&pg->buslock);
-}
-
-/* the gpiointr register is read-clear, so just do nothing. */
-static void pmic_irq_unmask(struct irq_data *data) { }
-
-static void pmic_irq_mask(struct irq_data *data) { }
-
-static struct irq_chip pmic_irqchip = {
-       .name                   = "PMIC-GPIO",
-       .irq_mask               = pmic_irq_mask,
-       .irq_unmask             = pmic_irq_unmask,
-       .irq_set_type           = pmic_irq_type,
-       .irq_bus_lock           = pmic_bus_lock,
-       .irq_bus_sync_unlock    = pmic_bus_sync_unlock,
-};
-
-static irqreturn_t pmic_irq_handler(int irq, void *data)
-{
-       struct pmic_gpio *pg = data;
-       u8 intsts = *((u8 *)pg->gpiointr + 4);
-       int gpio;
-       irqreturn_t ret = IRQ_NONE;
-
-       for (gpio = 0; gpio < 8; gpio++) {
-               if (intsts & (1 << gpio)) {
-                       pr_debug("pmic pin %d triggered\n", gpio);
-                       generic_handle_irq(pg->irq_base + gpio);
-                       ret = IRQ_HANDLED;
-               }
-       }
-       return ret;
-}
-
-static int platform_pmic_gpio_probe(struct platform_device *pdev)
-{
-       struct device *dev = &pdev->dev;
-       int irq = platform_get_irq(pdev, 0);
-       struct intel_pmic_gpio_platform_data *pdata = dev->platform_data;
-
-       struct pmic_gpio *pg;
-       int retval;
-       int i;
-
-       if (irq < 0) {
-               dev_dbg(dev, "no IRQ line\n");
-               return -EINVAL;
-       }
-
-       if (!pdata || !pdata->gpio_base || !pdata->irq_base) {
-               dev_dbg(dev, "incorrect or missing platform data\n");
-               return -EINVAL;
-       }
-
-       pg = kzalloc(sizeof(*pg), GFP_KERNEL);
-       if (!pg)
-               return -ENOMEM;
-
-       dev_set_drvdata(dev, pg);
-
-       pg->irq = irq;
-       /* setting up SRAM mapping for GPIOINT register */
-       pg->gpiointr = ioremap_nocache(pdata->gpiointr, 8);
-       if (!pg->gpiointr) {
-               pr_err("Can not map GPIOINT\n");
-               retval = -EINVAL;
-               goto err2;
-       }
-       pg->irq_base = pdata->irq_base;
-       pg->chip.label = "intel_pmic";
-       pg->chip.direction_input = pmic_gpio_direction_input;
-       pg->chip.direction_output = pmic_gpio_direction_output;
-       pg->chip.get = pmic_gpio_get;
-       pg->chip.set = pmic_gpio_set;
-       pg->chip.to_irq = pmic_gpio_to_irq;
-       pg->chip.base = pdata->gpio_base;
-       pg->chip.ngpio = NUM_GPIO;
-       pg->chip.can_sleep = 1;
-       pg->chip.parent = dev;
-
-       mutex_init(&pg->buslock);
-
-       pg->chip.parent = dev;
-       retval = gpiochip_add_data(&pg->chip, pg);
-       if (retval) {
-               pr_err("Can not add pmic gpio chip\n");
-               goto err;
-       }
-
-       retval = request_irq(pg->irq, pmic_irq_handler, 0, "pmic", pg);
-       if (retval) {
-               pr_warn("Interrupt request failed\n");
-               goto fail_request_irq;
-       }
-
-       for (i = 0; i < 8; i++) {
-               irq_set_chip_and_handler_name(i + pg->irq_base,
-                                             &pmic_irqchip,
-                                             handle_simple_irq,
-                                             "demux");
-               irq_set_chip_data(i + pg->irq_base, pg);
-       }
-       return 0;
-
-fail_request_irq:
-       gpiochip_remove(&pg->chip);
-err:
-       iounmap(pg->gpiointr);
-err2:
-       kfree(pg);
-       return retval;
-}
-
-/* at the same time, register a platform driver
- * this supports the sfi 0.81 fw */
-static struct platform_driver platform_pmic_gpio_driver = {
-       .driver = {
-               .name           = DRIVER_NAME,
-       },
-       .probe          = platform_pmic_gpio_probe,
-};
-
-static int __init platform_pmic_gpio_init(void)
-{
-       return platform_driver_register(&platform_pmic_gpio_driver);
-}
-subsys_initcall(platform_pmic_gpio_init);
index 97b4c3a219c0c79f3a3ed9359bc30bb13a439ce6..25f15df5c2d7b3c37b82e099f301831c403caa7d 100644 (file)
@@ -326,7 +326,7 @@ static int __init mlxplat_init(void)
        return 0;
 
 fail_platform_mux_register:
-       for (i--; i > 0 ; i--)
+       while (--i >= 0)
                platform_device_unregister(priv->pdev_mux[i]);
        platform_device_unregister(priv->pdev_i2c);
 fail_alloc:
index cbf4d83a727106ee0f7e42ca1b868616d994c0c3..25b176996cb793a789214a1a1237910b01cd3673 100644 (file)
@@ -139,7 +139,7 @@ static acpi_status s3_wmi_attach_spi_device(acpi_handle handle,
 
 static int s3_wmi_check_platform_device(struct device *dev, void *data)
 {
-       struct acpi_device *adev, *ts_adev;
+       struct acpi_device *adev, *ts_adev = NULL;
        acpi_handle handle;
        acpi_status status;
 
@@ -244,13 +244,11 @@ static int s3_wmi_remove(struct platform_device *device)
        return 0;
 }
 
-#ifdef CONFIG_PM
-static int s3_wmi_resume(struct device *dev)
+static int __maybe_unused s3_wmi_resume(struct device *dev)
 {
        s3_wmi_send_lid_state();
        return 0;
 }
-#endif
 static SIMPLE_DEV_PM_OPS(s3_wmi_pm, NULL, s3_wmi_resume);
 
 static struct platform_driver s3_wmi_driver = {
index abeb77217a21994a8bdf8481ef0e9dce993af38d..b8cacccf18c8b8ecee42963c8bbbcd2a5e7a309a 100644 (file)
@@ -32,7 +32,7 @@ config POWER_RESET_AT91_RESET
 
 config POWER_RESET_AT91_SAMA5D2_SHDWC
        tristate "Atmel AT91 SAMA5D2-Compatible shutdown controller driver"
-       depends on ARCH_AT91 || COMPILE_TEST
+       depends on ARCH_AT91
        default SOC_SAMA5
        help
          This driver supports the alternate shutdown controller for some Atmel
index a85dd4d233af39713a4a25aac67fd4c7747f8d06..c6c3beea72f994e0c8fd889f04474627097ad93d 100644 (file)
 #include <linux/io.h>
 #include <linux/module.h>
 #include <linux/of.h>
+#include <linux/of_address.h>
 #include <linux/platform_device.h>
 #include <linux/printk.h>
 
+#include <soc/at91/at91sam9_ddrsdr.h>
+
 #define AT91_SHDW_CR   0x00            /* Shut Down Control Register */
 #define AT91_SHDW_SHDW         BIT(0)                  /* Shut Down command */
 #define AT91_SHDW_KEY          (0xa5 << 24)            /* KEY Password */
@@ -50,6 +53,7 @@ static const char *shdwc_wakeup_modes[] = {
 
 static void __iomem *at91_shdwc_base;
 static struct clk *sclk;
+static void __iomem *mpddrc_base;
 
 static void __init at91_wakeup_status(void)
 {
@@ -73,6 +77,29 @@ static void at91_poweroff(void)
        writel(AT91_SHDW_KEY | AT91_SHDW_SHDW, at91_shdwc_base + AT91_SHDW_CR);
 }
 
+static void at91_lpddr_poweroff(void)
+{
+       asm volatile(
+               /* Align to cache lines */
+               ".balign 32\n\t"
+
+               /* Ensure AT91_SHDW_CR is in the TLB by reading it */
+               "       ldr     r6, [%2, #" __stringify(AT91_SHDW_CR) "]\n\t"
+
+               /* Power down SDRAM0 */
+               "       str     %1, [%0, #" __stringify(AT91_DDRSDRC_LPR) "]\n\t"
+               /* Shutdown CPU */
+               "       str     %3, [%2, #" __stringify(AT91_SHDW_CR) "]\n\t"
+
+               "       b       .\n\t"
+               :
+               : "r" (mpddrc_base),
+                 "r" cpu_to_le32(AT91_DDRSDRC_LPDDR2_PWOFF),
+                 "r" (at91_shdwc_base),
+                 "r" cpu_to_le32(AT91_SHDW_KEY | AT91_SHDW_SHDW)
+               : "r0");
+}
+
 static int at91_poweroff_get_wakeup_mode(struct device_node *np)
 {
        const char *pm;
@@ -124,6 +151,8 @@ static void at91_poweroff_dt_set_wakeup_mode(struct platform_device *pdev)
 static int __init at91_poweroff_probe(struct platform_device *pdev)
 {
        struct resource *res;
+       struct device_node *np;
+       u32 ddr_type;
        int ret;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -150,12 +179,30 @@ static int __init at91_poweroff_probe(struct platform_device *pdev)
 
        pm_power_off = at91_poweroff;
 
+       np = of_find_compatible_node(NULL, NULL, "atmel,sama5d3-ddramc");
+       if (!np)
+               return 0;
+
+       mpddrc_base = of_iomap(np, 0);
+       of_node_put(np);
+
+       if (!mpddrc_base)
+               return 0;
+
+       ddr_type = readl(mpddrc_base + AT91_DDRSDRC_MDR) & AT91_DDRSDRC_MD;
+       if ((ddr_type == AT91_DDRSDRC_MD_LPDDR2) ||
+           (ddr_type == AT91_DDRSDRC_MD_LPDDR3))
+               pm_power_off = at91_lpddr_poweroff;
+       else
+               iounmap(mpddrc_base);
+
        return 0;
 }
 
 static int __exit at91_poweroff_remove(struct platform_device *pdev)
 {
-       if (pm_power_off == at91_poweroff)
+       if (pm_power_off == at91_poweroff ||
+           pm_power_off == at91_lpddr_poweroff)
                pm_power_off = NULL;
 
        clk_disable_unprepare(sclk);
@@ -163,6 +210,11 @@ static int __exit at91_poweroff_remove(struct platform_device *pdev)
        return 0;
 }
 
+static const struct of_device_id at91_ramc_of_match[] = {
+       { .compatible = "atmel,sama5d3-ddramc", },
+       { /* sentinel */ }
+};
+
 static const struct of_device_id at91_poweroff_of_match[] = {
        { .compatible = "atmel,at91sam9260-shdwc", },
        { .compatible = "atmel,at91sam9rl-shdwc", },
index 568580cf06552dc1e52ae39059687f25076db7bf..b99769f8ab15678256a81eecb5eaad6f17ad2083 100644 (file)
@@ -134,6 +134,15 @@ static int sama5d3_restart(struct notifier_block *this, unsigned long mode,
        return NOTIFY_DONE;
 }
 
+static int samx7_restart(struct notifier_block *this, unsigned long mode,
+                        void *cmd)
+{
+       writel(cpu_to_le32(AT91_RSTC_KEY | AT91_RSTC_PROCRST),
+              at91_rstc_base);
+
+       return NOTIFY_DONE;
+}
+
 static void __init at91_reset_status(struct platform_device *pdev)
 {
        u32 reg = readl(at91_rstc_base + AT91_RSTC_SR);
@@ -173,6 +182,7 @@ static const struct of_device_id at91_reset_of_match[] = {
        { .compatible = "atmel,at91sam9260-rstc", .data = at91sam9260_restart },
        { .compatible = "atmel,at91sam9g45-rstc", .data = at91sam9g45_restart },
        { .compatible = "atmel,sama5d3-rstc", .data = sama5d3_restart },
+       { .compatible = "atmel,samx7-rstc", .data = samx7_restart },
        { /* sentinel */ }
 };
 MODULE_DEVICE_TABLE(of, at91_reset_of_match);
@@ -238,20 +248,12 @@ static int __exit at91_reset_remove(struct platform_device *pdev)
        return 0;
 }
 
-static const struct platform_device_id at91_reset_plat_match[] = {
-       { "at91-sam9260-reset", (unsigned long)at91sam9260_restart },
-       { "at91-sam9g45-reset", (unsigned long)at91sam9g45_restart },
-       { /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(platform, at91_reset_plat_match);
-
 static struct platform_driver at91_reset_driver = {
        .remove = __exit_p(at91_reset_remove),
        .driver = {
                .name = "at91-reset",
                .of_match_table = at91_reset_of_match,
        },
-       .id_table = at91_reset_plat_match,
 };
 module_platform_driver_probe(at91_reset_driver, at91_reset_probe);
 
index 8a5ac9706c9cb31a3584fe8b85e24241605873e7..90b0b5a70ce52a303017319e31afb4bd390d230f 100644 (file)
 #include <linux/io.h>
 #include <linux/module.h>
 #include <linux/of.h>
+#include <linux/of_address.h>
 #include <linux/platform_device.h>
 #include <linux/printk.h>
 
+#include <soc/at91/at91sam9_ddrsdr.h>
+
 #define SLOW_CLOCK_FREQ        32768
 
 #define AT91_SHDW_CR   0x00            /* Shut Down Control Register */
@@ -75,6 +78,7 @@ struct shdwc {
  */
 static struct shdwc *at91_shdwc;
 static struct clk *sclk;
+static void __iomem *mpddrc_base;
 
 static const unsigned long long sdwc_dbc_period[] = {
        0, 3, 32, 512, 4096, 32768,
@@ -108,6 +112,29 @@ static void at91_poweroff(void)
               at91_shdwc->at91_shdwc_base + AT91_SHDW_CR);
 }
 
+static void at91_lpddr_poweroff(void)
+{
+       asm volatile(
+               /* Align to cache lines */
+               ".balign 32\n\t"
+
+               /* Ensure AT91_SHDW_CR is in the TLB by reading it */
+               "       ldr     r6, [%2, #" __stringify(AT91_SHDW_CR) "]\n\t"
+
+               /* Power down SDRAM0 */
+               "       str     %1, [%0, #" __stringify(AT91_DDRSDRC_LPR) "]\n\t"
+               /* Shutdown CPU */
+               "       str     %3, [%2, #" __stringify(AT91_SHDW_CR) "]\n\t"
+
+               "       b       .\n\t"
+               :
+               : "r" (mpddrc_base),
+                 "r" cpu_to_le32(AT91_DDRSDRC_LPDDR2_PWOFF),
+                 "r" (at91_shdwc->at91_shdwc_base),
+                 "r" cpu_to_le32(AT91_SHDW_KEY | AT91_SHDW_SHDW)
+               : "r0");
+}
+
 static u32 at91_shdwc_debouncer_value(struct platform_device *pdev,
                                      u32 in_period_us)
 {
@@ -212,6 +239,8 @@ static int __init at91_shdwc_probe(struct platform_device *pdev)
 {
        struct resource *res;
        const struct of_device_id *match;
+       struct device_node *np;
+       u32 ddr_type;
        int ret;
 
        if (!pdev->dev.of_node)
@@ -249,6 +278,23 @@ static int __init at91_shdwc_probe(struct platform_device *pdev)
 
        pm_power_off = at91_poweroff;
 
+       np = of_find_compatible_node(NULL, NULL, "atmel,sama5d3-ddramc");
+       if (!np)
+               return 0;
+
+       mpddrc_base = of_iomap(np, 0);
+       of_node_put(np);
+
+       if (!mpddrc_base)
+               return 0;
+
+       ddr_type = readl(mpddrc_base + AT91_DDRSDRC_MDR) & AT91_DDRSDRC_MD;
+       if ((ddr_type == AT91_DDRSDRC_MD_LPDDR2) ||
+           (ddr_type == AT91_DDRSDRC_MD_LPDDR3))
+               pm_power_off = at91_lpddr_poweroff;
+       else
+               iounmap(mpddrc_base);
+
        return 0;
 }
 
@@ -256,7 +302,8 @@ static int __exit at91_shdwc_remove(struct platform_device *pdev)
 {
        struct shdwc *shdw = platform_get_drvdata(pdev);
 
-       if (pm_power_off == at91_poweroff)
+       if (pm_power_off == at91_poweroff ||
+           pm_power_off == at91_lpddr_poweroff)
                pm_power_off = NULL;
 
        /* Reset values to disable wake-up features  */
index 76806a0be820ecd61bdc38f8577433f913981ce1..da54ac88f068977965bec59fce129ecd0de7fc1b 100644 (file)
@@ -164,6 +164,12 @@ config BATTERY_SBS
          Say Y to include support for SBS battery driver for SBS-compliant
          gas gauges.
 
+config CHARGER_SBS
+        tristate "SBS Compliant charger"
+        depends on I2C
+        help
+         Say Y to include support for SBS compilant battery chargers.
+
 config BATTERY_BQ27XXX
        tristate "BQ27xxx battery driver"
        help
@@ -214,6 +220,18 @@ config BATTERY_DA9150
          This driver can also be built as a module. If so, the module will be
          called da9150-fg.
 
+config CHARGER_AXP20X
+       tristate "X-Powers AXP20X and AXP22X AC power supply driver"
+       depends on MFD_AXP20X
+       depends on AXP20X_ADC
+       depends on IIO
+       help
+         Say Y here to enable support for X-Powers AXP20X and AXP22X PMICs' AC
+         power supply.
+
+         This driver can also be built as a module. If so, the module will be
+         called axp20x_ac_power.
+
 config AXP288_CHARGER
        tristate "X-Powers AXP288 Charger"
        depends on MFD_AXP20X && EXTCON_AXP288
@@ -292,13 +310,6 @@ config BATTERY_JZ4740
          This driver can be build as a module. If so, the module will be
          called jz4740-battery.
 
-config BATTERY_INTEL_MID
-       tristate "Battery driver for Intel MID platforms"
-       depends on INTEL_SCU_IPC && SPI
-       help
-         Say Y here to enable the battery driver on Intel MID
-         platforms.
-
 config BATTERY_RX51
        tristate "Nokia RX-51 (N900) battery driver"
        depends on TWL4030_MADC
@@ -370,6 +381,16 @@ config CHARGER_MAX14577
          Say Y to enable support for the battery charger control sysfs and
          platform data of MAX14577/77836 MUICs.
 
+config CHARGER_DETECTOR_MAX14656
+       tristate "Maxim MAX14656 USB charger detector"
+       depends on I2C
+       depends on OF
+       help
+         Say Y to enable support for the Maxim MAX14656 USB charger detector.
+         The device is compliant with the USB Battery Charging Specification
+         Revision 1.2 and can be found e.g. in Kindle 4/5th generation
+         readers and certain LG devices.
+
 config CHARGER_MAX77693
        tristate "Maxim MAX77693 battery charger driver"
        depends on MFD_MAX77693
@@ -395,6 +416,7 @@ config CHARGER_QCOM_SMBB
        depends on MFD_SPMI_PMIC || COMPILE_TEST
        depends on OF
        depends on EXTCON
+       depends on REGULATOR
        help
          Say Y to include support for the Switch-Mode Battery Charger and
          Boost (SMBB) hardware found in Qualcomm PM8941 PMICs.  The charger
index 36c599d9a495ccf6f329cd806efc2554c15154af..3789a2c06fdf92dfc7d0b74bb65fc8221fe1ca71 100644 (file)
@@ -18,6 +18,7 @@ obj-$(CONFIG_TEST_POWER)      += test_power.o
 
 obj-$(CONFIG_BATTERY_88PM860X) += 88pm860x_battery.o
 obj-$(CONFIG_BATTERY_ACT8945A) += act8945a_charger.o
+obj-$(CONFIG_CHARGER_AXP20X)   += axp20x_ac_power.o
 obj-$(CONFIG_BATTERY_DS2760)   += ds2760_battery.o
 obj-$(CONFIG_BATTERY_DS2780)   += ds2780_battery.o
 obj-$(CONFIG_BATTERY_DS2781)   += ds2781_battery.o
@@ -31,6 +32,7 @@ obj-$(CONFIG_BATTERY_COLLIE)  += collie_battery.o
 obj-$(CONFIG_BATTERY_IPAQ_MICRO) += ipaq_micro_battery.o
 obj-$(CONFIG_BATTERY_WM97XX)   += wm97xx_battery.o
 obj-$(CONFIG_BATTERY_SBS)      += sbs-battery.o
+obj-$(CONFIG_CHARGER_SBS)      += sbs-charger.o
 obj-$(CONFIG_BATTERY_BQ27XXX)  += bq27xxx_battery.o
 obj-$(CONFIG_BATTERY_BQ27XXX_I2C) += bq27xxx_battery_i2c.o
 obj-$(CONFIG_BATTERY_DA9030)   += da9030_battery.o
@@ -47,7 +49,6 @@ obj-$(CONFIG_BATTERY_TWL4030_MADC)    += twl4030_madc_battery.o
 obj-$(CONFIG_CHARGER_88PM860X) += 88pm860x_charger.o
 obj-$(CONFIG_CHARGER_PCF50633) += pcf50633-charger.o
 obj-$(CONFIG_BATTERY_JZ4740)   += jz4740-battery.o
-obj-$(CONFIG_BATTERY_INTEL_MID)        += intel_mid_battery.o
 obj-$(CONFIG_BATTERY_RX51)     += rx51_battery.o
 obj-$(CONFIG_AB8500_BM)                += ab8500_bmdata.o ab8500_charger.o ab8500_fg.o ab8500_btemp.o abx500_chargalg.o pm2301_charger.o
 obj-$(CONFIG_CHARGER_ISP1704)  += isp1704_charger.o
@@ -58,6 +59,7 @@ obj-$(CONFIG_CHARGER_LP8788)  += lp8788-charger.o
 obj-$(CONFIG_CHARGER_GPIO)     += gpio-charger.o
 obj-$(CONFIG_CHARGER_MANAGER)  += charger-manager.o
 obj-$(CONFIG_CHARGER_MAX14577) += max14577_charger.o
+obj-$(CONFIG_CHARGER_DETECTOR_MAX14656)        += max14656_charger_detector.o
 obj-$(CONFIG_CHARGER_MAX77693) += max77693_charger.o
 obj-$(CONFIG_CHARGER_MAX8997)  += max8997_charger.o
 obj-$(CONFIG_CHARGER_MAX8998)  += max8998_charger.o
index 6ffdc18f2599b04a96d0432d5f18dc6a8d973fbe..f7a35ebfbab29cf329312bd4b7315bdd0b2b7ec6 100644 (file)
@@ -76,8 +76,8 @@ struct ab8500_btemp_ranges {
  * @dev:               Pointer to the structure device
  * @node:              List of AB8500 BTEMPs, hence prepared for reentrance
  * @curr_source:       What current source we use, in uA
- * @bat_temp:          Dispatched battery temperature in degree Celcius
- * @prev_bat_temp      Last measured battery temperature in degree Celcius
+ * @bat_temp:          Dispatched battery temperature in degree Celsius
+ * @prev_bat_temp      Last measured battery temperature in degree Celsius
  * @parent:            Pointer to the struct ab8500
  * @gpadc:             Pointer to the struct gpadc
  * @fg:                        Pointer to the struct fg
@@ -123,10 +123,7 @@ static LIST_HEAD(ab8500_btemp_list);
  */
 struct ab8500_btemp *ab8500_btemp_get(void)
 {
-       struct ab8500_btemp *btemp;
-       btemp = list_first_entry(&ab8500_btemp_list, struct ab8500_btemp, node);
-
-       return btemp;
+       return list_first_entry(&ab8500_btemp_list, struct ab8500_btemp, node);
 }
 EXPORT_SYMBOL(ab8500_btemp_get);
 
@@ -464,13 +461,13 @@ static int ab8500_btemp_get_batctrl_res(struct ab8500_btemp *di)
  * @tbl_size:  size of the resistance to temperature table
  * @res:       resistance to calculate the temperature from
  *
- * This function returns the battery temperature in degrees Celcius
+ * This function returns the battery temperature in degrees Celsius
  * based on the NTC resistance.
  */
 static int ab8500_btemp_res_to_temp(struct ab8500_btemp *di,
        const struct abx500_res_to_temp *tbl, int tbl_size, int res)
 {
-       int i, temp;
+       int i;
        /*
         * Calculate the formula for the straight line
         * Simple interpolation if we are within
@@ -488,9 +485,8 @@ static int ab8500_btemp_res_to_temp(struct ab8500_btemp *di,
                        i++;
        }
 
-       temp = tbl[i].temp + ((tbl[i + 1].temp - tbl[i].temp) *
+       return tbl[i].temp + ((tbl[i + 1].temp - tbl[i].temp) *
                (res - tbl[i].resist)) / (tbl[i + 1].resist - tbl[i].resist);
-       return temp;
 }
 
 /**
diff --git a/drivers/power/supply/axp20x_ac_power.c b/drivers/power/supply/axp20x_ac_power.c
new file mode 100644 (file)
index 0000000..38f4e87
--- /dev/null
@@ -0,0 +1,253 @@
+/*
+ * AXP20X and AXP22X PMICs' ACIN power supply driver
+ *
+ * Copyright (C) 2016 Free Electrons
+ *     Quentin Schulz <quentin.schulz@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under  the terms of the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mfd/axp20x.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/iio/consumer.h>
+
+#define AXP20X_PWR_STATUS_ACIN_PRESENT BIT(7)
+#define AXP20X_PWR_STATUS_ACIN_AVAIL   BIT(6)
+
+#define DRVNAME "axp20x-ac-power-supply"
+
+struct axp20x_ac_power {
+       struct regmap *regmap;
+       struct power_supply *supply;
+       struct iio_channel *acin_v;
+       struct iio_channel *acin_i;
+};
+
+static irqreturn_t axp20x_ac_power_irq(int irq, void *devid)
+{
+       struct axp20x_ac_power *power = devid;
+
+       power_supply_changed(power->supply);
+
+       return IRQ_HANDLED;
+}
+
+static int axp20x_ac_power_get_property(struct power_supply *psy,
+                                       enum power_supply_property psp,
+                                       union power_supply_propval *val)
+{
+       struct axp20x_ac_power *power = power_supply_get_drvdata(psy);
+       int ret, reg;
+
+       switch (psp) {
+       case POWER_SUPPLY_PROP_HEALTH:
+               ret = regmap_read(power->regmap, AXP20X_PWR_INPUT_STATUS, &reg);
+               if (ret)
+                       return ret;
+
+               if (reg & AXP20X_PWR_STATUS_ACIN_PRESENT) {
+                       val->intval = POWER_SUPPLY_HEALTH_GOOD;
+                       return 0;
+               }
+
+               val->intval = POWER_SUPPLY_HEALTH_UNKNOWN;
+               return 0;
+
+       case POWER_SUPPLY_PROP_PRESENT:
+               ret = regmap_read(power->regmap, AXP20X_PWR_INPUT_STATUS, &reg);
+               if (ret)
+                       return ret;
+
+               val->intval = !!(reg & AXP20X_PWR_STATUS_ACIN_PRESENT);
+               return 0;
+
+       case POWER_SUPPLY_PROP_ONLINE:
+               ret = regmap_read(power->regmap, AXP20X_PWR_INPUT_STATUS, &reg);
+               if (ret)
+                       return ret;
+
+               val->intval = !!(reg & AXP20X_PWR_STATUS_ACIN_AVAIL);
+               return 0;
+
+       case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+               ret = iio_read_channel_processed(power->acin_v, &val->intval);
+               if (ret)
+                       return ret;
+
+               /* IIO framework gives mV but Power Supply framework gives uV */
+               val->intval *= 1000;
+
+               return 0;
+
+       case POWER_SUPPLY_PROP_CURRENT_NOW:
+               ret = iio_read_channel_processed(power->acin_i, &val->intval);
+               if (ret)
+                       return ret;
+
+               /* IIO framework gives mA but Power Supply framework gives uA */
+               val->intval *= 1000;
+
+               return 0;
+
+       default:
+               return -EINVAL;
+       }
+
+       return -EINVAL;
+}
+
+static enum power_supply_property axp20x_ac_power_properties[] = {
+       POWER_SUPPLY_PROP_HEALTH,
+       POWER_SUPPLY_PROP_PRESENT,
+       POWER_SUPPLY_PROP_ONLINE,
+       POWER_SUPPLY_PROP_VOLTAGE_NOW,
+       POWER_SUPPLY_PROP_CURRENT_NOW,
+};
+
+static enum power_supply_property axp22x_ac_power_properties[] = {
+       POWER_SUPPLY_PROP_HEALTH,
+       POWER_SUPPLY_PROP_PRESENT,
+       POWER_SUPPLY_PROP_ONLINE,
+};
+
+static const struct power_supply_desc axp20x_ac_power_desc = {
+       .name = "axp20x-ac",
+       .type = POWER_SUPPLY_TYPE_MAINS,
+       .properties = axp20x_ac_power_properties,
+       .num_properties = ARRAY_SIZE(axp20x_ac_power_properties),
+       .get_property = axp20x_ac_power_get_property,
+};
+
+static const struct power_supply_desc axp22x_ac_power_desc = {
+       .name = "axp22x-ac",
+       .type = POWER_SUPPLY_TYPE_MAINS,
+       .properties = axp22x_ac_power_properties,
+       .num_properties = ARRAY_SIZE(axp22x_ac_power_properties),
+       .get_property = axp20x_ac_power_get_property,
+};
+
+struct axp_data {
+       const struct power_supply_desc  *power_desc;
+       bool                            acin_adc;
+};
+
+static const struct axp_data axp20x_data = {
+       .power_desc = &axp20x_ac_power_desc,
+       .acin_adc = true,
+};
+
+static const struct axp_data axp22x_data = {
+       .power_desc = &axp22x_ac_power_desc,
+       .acin_adc = false,
+};
+
+static int axp20x_ac_power_probe(struct platform_device *pdev)
+{
+       struct axp20x_dev *axp20x = dev_get_drvdata(pdev->dev.parent);
+       struct power_supply_config psy_cfg = {};
+       struct axp20x_ac_power *power;
+       struct axp_data *axp_data;
+       static const char * const irq_names[] = { "ACIN_PLUGIN", "ACIN_REMOVAL",
+               NULL };
+       int i, irq, ret;
+
+       if (!of_device_is_available(pdev->dev.of_node))
+               return -ENODEV;
+
+       if (!axp20x) {
+               dev_err(&pdev->dev, "Parent drvdata not set\n");
+               return -EINVAL;
+       }
+
+       power = devm_kzalloc(&pdev->dev, sizeof(*power), GFP_KERNEL);
+       if (!power)
+               return -ENOMEM;
+
+       axp_data = (struct axp_data *)of_device_get_match_data(&pdev->dev);
+
+       if (axp_data->acin_adc) {
+               power->acin_v = devm_iio_channel_get(&pdev->dev, "acin_v");
+               if (IS_ERR(power->acin_v)) {
+                       if (PTR_ERR(power->acin_v) == -ENODEV)
+                               return -EPROBE_DEFER;
+                       return PTR_ERR(power->acin_v);
+               }
+
+               power->acin_i = devm_iio_channel_get(&pdev->dev, "acin_i");
+               if (IS_ERR(power->acin_i)) {
+                       if (PTR_ERR(power->acin_i) == -ENODEV)
+                               return -EPROBE_DEFER;
+                       return PTR_ERR(power->acin_i);
+               }
+       }
+
+       power->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+
+       platform_set_drvdata(pdev, power);
+
+       psy_cfg.of_node = pdev->dev.of_node;
+       psy_cfg.drv_data = power;
+
+       power->supply = devm_power_supply_register(&pdev->dev,
+                                                  axp_data->power_desc,
+                                                  &psy_cfg);
+       if (IS_ERR(power->supply))
+               return PTR_ERR(power->supply);
+
+       /* Request irqs after registering, as irqs may trigger immediately */
+       for (i = 0; irq_names[i]; i++) {
+               irq = platform_get_irq_byname(pdev, irq_names[i]);
+               if (irq < 0) {
+                       dev_warn(&pdev->dev, "No IRQ for %s: %d\n",
+                                irq_names[i], irq);
+                       continue;
+               }
+               irq = regmap_irq_get_virq(axp20x->regmap_irqc, irq);
+               ret = devm_request_any_context_irq(&pdev->dev, irq,
+                                                  axp20x_ac_power_irq, 0,
+                                                  DRVNAME, power);
+               if (ret < 0)
+                       dev_warn(&pdev->dev, "Error requesting %s IRQ: %d\n",
+                                irq_names[i], ret);
+       }
+
+       return 0;
+}
+
+static const struct of_device_id axp20x_ac_power_match[] = {
+       {
+               .compatible = "x-powers,axp202-ac-power-supply",
+               .data = (void *)&axp20x_data,
+       }, {
+               .compatible = "x-powers,axp221-ac-power-supply",
+               .data = (void *)&axp22x_data,
+       }, { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, axp20x_ac_power_match);
+
+static struct platform_driver axp20x_ac_power_driver = {
+       .probe = axp20x_ac_power_probe,
+       .driver = {
+               .name = DRVNAME,
+               .of_match_table = axp20x_ac_power_match,
+       },
+};
+
+module_platform_driver(axp20x_ac_power_driver);
+
+MODULE_AUTHOR("Quentin Schulz <quentin.schulz@free-electrons.com>");
+MODULE_DESCRIPTION("AXP20X and AXP22X PMICs' AC power supply driver");
+MODULE_LICENSE("GPL");
index 6af6feb7058da73f02ac7dadcca56a776925073b..2397c482656ebb2be64afd550a666ea840fa5fb5 100644 (file)
 #include <linux/mfd/axp20x.h>
 #include <linux/module.h>
 #include <linux/of.h>
+#include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/power_supply.h>
 #include <linux/regmap.h>
 #include <linux/slab.h>
+#include <linux/iio/consumer.h>
 
 #define DRVNAME "axp20x-usb-power-supply"
 
@@ -30,6 +32,8 @@
 #define AXP20X_USB_STATUS_VBUS_VALID   BIT(2)
 
 #define AXP20X_VBUS_VHOLD_uV(b)                (4000000 + (((b) >> 3) & 7) * 100000)
+#define AXP20X_VBUS_VHOLD_MASK         GENMASK(5, 3)
+#define AXP20X_VBUS_VHOLD_OFFSET       3
 #define AXP20X_VBUS_CLIMIT_MASK                3
 #define AXP20X_VBUC_CLIMIT_900mA       0
 #define AXP20X_VBUC_CLIMIT_500mA       1
@@ -45,6 +49,9 @@ struct axp20x_usb_power {
        struct device_node *np;
        struct regmap *regmap;
        struct power_supply *supply;
+       enum axp20x_variants axp20x_id;
+       struct iio_channel *vbus_v;
+       struct iio_channel *vbus_i;
 };
 
 static irqreturn_t axp20x_usb_power_irq(int irq, void *devid)
@@ -72,6 +79,20 @@ static int axp20x_usb_power_get_property(struct power_supply *psy,
                val->intval = AXP20X_VBUS_VHOLD_uV(v);
                return 0;
        case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+               if (IS_ENABLED(CONFIG_AXP20X_ADC)) {
+                       ret = iio_read_channel_processed(power->vbus_v,
+                                                        &val->intval);
+                       if (ret)
+                               return ret;
+
+                       /*
+                        * IIO framework gives mV but Power Supply framework
+                        * gives uV.
+                        */
+                       val->intval *= 1000;
+                       return 0;
+               }
+
                ret = axp20x_read_variable_width(power->regmap,
                                                 AXP20X_VBUS_V_ADC_H, 12);
                if (ret < 0)
@@ -86,12 +107,10 @@ static int axp20x_usb_power_get_property(struct power_supply *psy,
 
                switch (v & AXP20X_VBUS_CLIMIT_MASK) {
                case AXP20X_VBUC_CLIMIT_100mA:
-                       if (of_device_is_compatible(power->np,
-                                       "x-powers,axp202-usb-power-supply")) {
-                               val->intval = 100000;
-                       } else {
+                       if (power->axp20x_id == AXP221_ID)
                                val->intval = -1; /* No 100mA limit */
-                       }
+                       else
+                               val->intval = 100000;
                        break;
                case AXP20X_VBUC_CLIMIT_500mA:
                        val->intval = 500000;
@@ -105,6 +124,20 @@ static int axp20x_usb_power_get_property(struct power_supply *psy,
                }
                return 0;
        case POWER_SUPPLY_PROP_CURRENT_NOW:
+               if (IS_ENABLED(CONFIG_AXP20X_ADC)) {
+                       ret = iio_read_channel_processed(power->vbus_i,
+                                                        &val->intval);
+                       if (ret)
+                               return ret;
+
+                       /*
+                        * IIO framework gives mA but Power Supply framework
+                        * gives uA.
+                        */
+                       val->intval *= 1000;
+                       return 0;
+               }
+
                ret = axp20x_read_variable_width(power->regmap,
                                                 AXP20X_VBUS_I_ADC_H, 12);
                if (ret < 0)
@@ -130,8 +163,7 @@ static int axp20x_usb_power_get_property(struct power_supply *psy,
 
                val->intval = POWER_SUPPLY_HEALTH_GOOD;
 
-               if (of_device_is_compatible(power->np,
-                               "x-powers,axp202-usb-power-supply")) {
+               if (power->axp20x_id == AXP202_ID) {
                        ret = regmap_read(power->regmap,
                                          AXP20X_USB_OTG_STATUS, &v);
                        if (ret)
@@ -155,6 +187,81 @@ static int axp20x_usb_power_get_property(struct power_supply *psy,
        return 0;
 }
 
+static int axp20x_usb_power_set_voltage_min(struct axp20x_usb_power *power,
+                                           int intval)
+{
+       int val;
+
+       switch (intval) {
+       case 4000000:
+       case 4100000:
+       case 4200000:
+       case 4300000:
+       case 4400000:
+       case 4500000:
+       case 4600000:
+       case 4700000:
+               val = (intval - 4000000) / 100000;
+               return regmap_update_bits(power->regmap,
+                                         AXP20X_VBUS_IPSOUT_MGMT,
+                                         AXP20X_VBUS_VHOLD_MASK,
+                                         val << AXP20X_VBUS_VHOLD_OFFSET);
+       default:
+               return -EINVAL;
+       }
+
+       return -EINVAL;
+}
+
+static int axp20x_usb_power_set_current_max(struct axp20x_usb_power *power,
+                                           int intval)
+{
+       int val;
+
+       switch (intval) {
+       case 100000:
+               if (power->axp20x_id == AXP221_ID)
+                       return -EINVAL;
+       case 500000:
+       case 900000:
+               val = (900000 - intval) / 400000;
+               return regmap_update_bits(power->regmap,
+                                         AXP20X_VBUS_IPSOUT_MGMT,
+                                         AXP20X_VBUS_CLIMIT_MASK, val);
+       default:
+               return -EINVAL;
+       }
+
+       return -EINVAL;
+}
+
+static int axp20x_usb_power_set_property(struct power_supply *psy,
+                                        enum power_supply_property psp,
+                                        const union power_supply_propval *val)
+{
+       struct axp20x_usb_power *power = power_supply_get_drvdata(psy);
+
+       switch (psp) {
+       case POWER_SUPPLY_PROP_VOLTAGE_MIN:
+               return axp20x_usb_power_set_voltage_min(power, val->intval);
+
+       case POWER_SUPPLY_PROP_CURRENT_MAX:
+               return axp20x_usb_power_set_current_max(power, val->intval);
+
+       default:
+               return -EINVAL;
+       }
+
+       return -EINVAL;
+}
+
+static int axp20x_usb_power_prop_writeable(struct power_supply *psy,
+                                          enum power_supply_property psp)
+{
+       return psp == POWER_SUPPLY_PROP_VOLTAGE_MIN ||
+              psp == POWER_SUPPLY_PROP_CURRENT_MAX;
+}
+
 static enum power_supply_property axp20x_usb_power_properties[] = {
        POWER_SUPPLY_PROP_HEALTH,
        POWER_SUPPLY_PROP_PRESENT,
@@ -178,7 +285,9 @@ static const struct power_supply_desc axp20x_usb_power_desc = {
        .type = POWER_SUPPLY_TYPE_USB,
        .properties = axp20x_usb_power_properties,
        .num_properties = ARRAY_SIZE(axp20x_usb_power_properties),
+       .property_is_writeable = axp20x_usb_power_prop_writeable,
        .get_property = axp20x_usb_power_get_property,
+       .set_property = axp20x_usb_power_set_property,
 };
 
 static const struct power_supply_desc axp22x_usb_power_desc = {
@@ -186,9 +295,41 @@ static const struct power_supply_desc axp22x_usb_power_desc = {
        .type = POWER_SUPPLY_TYPE_USB,
        .properties = axp22x_usb_power_properties,
        .num_properties = ARRAY_SIZE(axp22x_usb_power_properties),
+       .property_is_writeable = axp20x_usb_power_prop_writeable,
        .get_property = axp20x_usb_power_get_property,
+       .set_property = axp20x_usb_power_set_property,
 };
 
+static int configure_iio_channels(struct platform_device *pdev,
+                                 struct axp20x_usb_power *power)
+{
+       power->vbus_v = devm_iio_channel_get(&pdev->dev, "vbus_v");
+       if (IS_ERR(power->vbus_v)) {
+               if (PTR_ERR(power->vbus_v) == -ENODEV)
+                       return -EPROBE_DEFER;
+               return PTR_ERR(power->vbus_v);
+       }
+
+       power->vbus_i = devm_iio_channel_get(&pdev->dev, "vbus_i");
+       if (IS_ERR(power->vbus_i)) {
+               if (PTR_ERR(power->vbus_i) == -ENODEV)
+                       return -EPROBE_DEFER;
+               return PTR_ERR(power->vbus_i);
+       }
+
+       return 0;
+}
+
+static int configure_adc_registers(struct axp20x_usb_power *power)
+{
+       /* Enable vbus voltage and current measurement */
+       return regmap_update_bits(power->regmap, AXP20X_ADC_EN1,
+                                 AXP20X_ADC_EN1_VBUS_CURR |
+                                 AXP20X_ADC_EN1_VBUS_VOLT,
+                                 AXP20X_ADC_EN1_VBUS_CURR |
+                                 AXP20X_ADC_EN1_VBUS_VOLT);
+}
+
 static int axp20x_usb_power_probe(struct platform_device *pdev)
 {
        struct axp20x_dev *axp20x = dev_get_drvdata(pdev->dev.parent);
@@ -214,11 +355,13 @@ static int axp20x_usb_power_probe(struct platform_device *pdev)
        if (!power)
                return -ENOMEM;
 
+       power->axp20x_id = (enum axp20x_variants)of_device_get_match_data(
+                                                               &pdev->dev);
+
        power->np = pdev->dev.of_node;
        power->regmap = axp20x->regmap;
 
-       if (of_device_is_compatible(power->np,
-                       "x-powers,axp202-usb-power-supply")) {
+       if (power->axp20x_id == AXP202_ID) {
                /* Enable vbus valid checking */
                ret = regmap_update_bits(power->regmap, AXP20X_VBUS_MON,
                                         AXP20X_VBUS_MON_VBUS_VALID,
@@ -226,17 +369,18 @@ static int axp20x_usb_power_probe(struct platform_device *pdev)
                if (ret)
                        return ret;
 
-               /* Enable vbus voltage and current measurement */
-               ret = regmap_update_bits(power->regmap, AXP20X_ADC_EN1,
-                       AXP20X_ADC_EN1_VBUS_CURR | AXP20X_ADC_EN1_VBUS_VOLT,
-                       AXP20X_ADC_EN1_VBUS_CURR | AXP20X_ADC_EN1_VBUS_VOLT);
+               if (IS_ENABLED(CONFIG_AXP20X_ADC))
+                       ret = configure_iio_channels(pdev, power);
+               else
+                       ret = configure_adc_registers(power);
+
                if (ret)
                        return ret;
 
                usb_power_desc = &axp20x_usb_power_desc;
                irq_names = axp20x_irq_names;
-       } else if (of_device_is_compatible(power->np,
-                       "x-powers,axp221-usb-power-supply")) {
+       } else if (power->axp20x_id == AXP221_ID ||
+                  power->axp20x_id == AXP223_ID) {
                usb_power_desc = &axp22x_usb_power_desc;
                irq_names = axp22x_irq_names;
        } else {
@@ -273,9 +417,16 @@ static int axp20x_usb_power_probe(struct platform_device *pdev)
 }
 
 static const struct of_device_id axp20x_usb_power_match[] = {
-       { .compatible = "x-powers,axp202-usb-power-supply" },
-       { .compatible = "x-powers,axp221-usb-power-supply" },
-       { }
+       {
+               .compatible = "x-powers,axp202-usb-power-supply",
+               .data = (void *)AXP202_ID,
+       }, {
+               .compatible = "x-powers,axp221-usb-power-supply",
+               .data = (void *)AXP221_ID,
+       }, {
+               .compatible = "x-powers,axp223-usb-power-supply",
+               .data = (void *)AXP223_ID,
+       }, { /* sentinel */ }
 };
 MODULE_DEVICE_TABLE(of, axp20x_usb_power_match);
 
index 75b8e0c7402b15bbe7b3aadadcc1b47aff538dff..6be2fe27bb07f38062225cb45ffc128c2b0511e8 100644 (file)
 #define CHRG_VLTFC_0C                  0xA5    /* 0 DegC */
 #define CHRG_VHTFC_45C                 0x1F    /* 45 DegC */
 
-#define BAT_IRQ_CFG_CHRG_DONE          (1 << 2)
-#define BAT_IRQ_CFG_CHRG_START         (1 << 3)
-#define BAT_IRQ_CFG_BAT_SAFE_EXIT      (1 << 4)
-#define BAT_IRQ_CFG_BAT_SAFE_ENTER     (1 << 5)
-#define BAT_IRQ_CFG_BAT_DISCON         (1 << 6)
-#define BAT_IRQ_CFG_BAT_CONN           (1 << 7)
-#define BAT_IRQ_CFG_BAT_MASK           0xFC
-
-#define TEMP_IRQ_CFG_QCBTU             (1 << 4)
-#define TEMP_IRQ_CFG_CBTU              (1 << 5)
-#define TEMP_IRQ_CFG_QCBTO             (1 << 6)
-#define TEMP_IRQ_CFG_CBTO              (1 << 7)
-#define TEMP_IRQ_CFG_MASK              0xF0
-
 #define FG_CNTL_OCV_ADJ_EN             (1 << 3)
 
 #define CV_4100MV                      4100    /* 4100mV */
 #define ILIM_3000MA                    3000    /* 3000mA */
 
 #define AXP288_EXTCON_DEV_NAME         "axp288_extcon"
+#define USB_HOST_EXTCON_DEV_NAME       "INT3496:00"
+
+static const unsigned int cable_ids[] =
+       { EXTCON_CHG_USB_SDP, EXTCON_CHG_USB_CDP, EXTCON_CHG_USB_DCP };
 
 enum {
        VBUS_OV_IRQ = 0,
@@ -143,7 +133,6 @@ enum {
 
 struct axp288_chrg_info {
        struct platform_device *pdev;
-       struct axp20x_chrg_pdata *pdata;
        struct regmap *regmap;
        struct regmap_irq_chip_data *regmap_irqc;
        int irq[CHRG_INTR_END];
@@ -163,20 +152,16 @@ struct axp288_chrg_info {
                struct extcon_dev *edev;
                bool connected;
                enum power_supply_type chg_type;
-               struct notifier_block nb;
+               struct notifier_block nb[ARRAY_SIZE(cable_ids)];
                struct work_struct work;
        } cable;
 
-       int health;
        int inlmt;
        int cc;
        int cv;
        int max_cc;
        int max_cv;
-       bool online;
-       bool present;
-       bool enable_charger;
-       bool is_charger_enabled;
+       int is_charger_enabled;
 };
 
 static inline int axp288_charger_set_cc(struct axp288_chrg_info *info, int cc)
@@ -305,6 +290,9 @@ static int axp288_charger_enable_charger(struct axp288_chrg_info *info,
 {
        int ret;
 
+       if ((int)enable == info->is_charger_enabled)
+               return 0;
+
        if (enable)
                ret = regmap_update_bits(info->regmap, AXP20X_CHRG_CTRL1,
                                CHRG_CCCV_CHG_EN, CHRG_CCCV_CHG_EN);
@@ -430,8 +418,7 @@ static int axp288_charger_usb_get_property(struct power_supply *psy,
                ret = axp288_charger_is_present(info);
                if (ret < 0)
                        goto psy_get_prop_fail;
-               info->present = ret;
-               val->intval = info->present;
+               val->intval = ret;
                break;
        case POWER_SUPPLY_PROP_ONLINE:
                /* Check for OTG case first */
@@ -442,8 +429,7 @@ static int axp288_charger_usb_get_property(struct power_supply *psy,
                ret = axp288_charger_is_online(info);
                if (ret < 0)
                        goto psy_get_prop_fail;
-               info->online = ret;
-               val->intval = info->online;
+               val->intval = ret;
                break;
        case POWER_SUPPLY_PROP_HEALTH:
                val->intval = axp288_get_charger_health(info);
@@ -576,20 +562,20 @@ static void axp288_charger_extcon_evt_worker(struct work_struct *work)
        struct axp288_chrg_info *info =
            container_of(work, struct axp288_chrg_info, cable.work);
        int ret, current_limit;
-       bool changed = false;
        struct extcon_dev *edev = info->cable.edev;
        bool old_connected = info->cable.connected;
+       enum power_supply_type old_chg_type = info->cable.chg_type;
 
        /* Determine cable/charger type */
-       if (extcon_get_cable_state_(edev, EXTCON_CHG_USB_SDP) > 0) {
+       if (extcon_get_state(edev, EXTCON_CHG_USB_SDP) > 0) {
                dev_dbg(&info->pdev->dev, "USB SDP charger  is connected");
                info->cable.connected = true;
                info->cable.chg_type = POWER_SUPPLY_TYPE_USB;
-       } else if (extcon_get_cable_state_(edev, EXTCON_CHG_USB_CDP) > 0) {
+       } else if (extcon_get_state(edev, EXTCON_CHG_USB_CDP) > 0) {
                dev_dbg(&info->pdev->dev, "USB CDP charger is connected");
                info->cable.connected = true;
                info->cable.chg_type = POWER_SUPPLY_TYPE_USB_CDP;
-       } else if (extcon_get_cable_state_(edev, EXTCON_CHG_USB_DCP) > 0) {
+       } else if (extcon_get_state(edev, EXTCON_CHG_USB_DCP) > 0) {
                dev_dbg(&info->pdev->dev, "USB DCP charger is connected");
                info->cable.connected = true;
                info->cable.chg_type = POWER_SUPPLY_TYPE_USB_DCP;
@@ -601,22 +587,15 @@ static void axp288_charger_extcon_evt_worker(struct work_struct *work)
        }
 
        /* Cable status changed */
-       if (old_connected != info->cable.connected)
-               changed = true;
-
-       if (!changed)
+       if (old_connected == info->cable.connected &&
+           old_chg_type == info->cable.chg_type)
                return;
 
        mutex_lock(&info->lock);
 
-       if (info->is_charger_enabled && !info->cable.connected) {
-               info->enable_charger = false;
-               ret = axp288_charger_enable_charger(info, info->enable_charger);
-               if (ret < 0)
-                       dev_err(&info->pdev->dev,
-                               "cannot disable charger (%d)", ret);
+       if (info->cable.connected) {
+               axp288_charger_enable_charger(info, false);
 
-       } else if (!info->is_charger_enabled && info->cable.connected) {
                switch (info->cable.chg_type) {
                case POWER_SUPPLY_TYPE_USB:
                        current_limit = ILIM_500MA;
@@ -635,36 +614,49 @@ static void axp288_charger_extcon_evt_worker(struct work_struct *work)
 
                /* Set vbus current limit first, then enable charger */
                ret = axp288_charger_set_vbus_inlmt(info, current_limit);
-               if (ret < 0) {
+               if (ret == 0)
+                       axp288_charger_enable_charger(info, true);
+               else
                        dev_err(&info->pdev->dev,
                                "error setting current limit (%d)", ret);
-               } else {
-                       info->enable_charger = (current_limit > 0);
-                       ret = axp288_charger_enable_charger(info,
-                                                       info->enable_charger);
-                       if (ret < 0)
-                               dev_err(&info->pdev->dev,
-                                       "cannot enable charger (%d)", ret);
-               }
+       } else {
+               axp288_charger_enable_charger(info, false);
        }
 
-       if (changed)
-               info->health = axp288_get_charger_health(info);
-
        mutex_unlock(&info->lock);
 
-       if (changed)
-               power_supply_changed(info->psy_usb);
+       power_supply_changed(info->psy_usb);
 }
 
-static int axp288_charger_handle_cable_evt(struct notifier_block *nb,
-                                         unsigned long event, void *param)
+/*
+ * We need 3 copies of this, because there is no way to find out for which
+ * cable id we are being called from the passed in arguments; and we must
+ * have a separate nb for each extcon_register_notifier call.
+ */
+static int axp288_charger_handle_cable0_evt(struct notifier_block *nb,
+                                           unsigned long event, void *param)
 {
        struct axp288_chrg_info *info =
-           container_of(nb, struct axp288_chrg_info, cable.nb);
+               container_of(nb, struct axp288_chrg_info, cable.nb[0]);
+       schedule_work(&info->cable.work);
+       return NOTIFY_OK;
+}
 
+static int axp288_charger_handle_cable1_evt(struct notifier_block *nb,
+                                           unsigned long event, void *param)
+{
+       struct axp288_chrg_info *info =
+               container_of(nb, struct axp288_chrg_info, cable.nb[1]);
        schedule_work(&info->cable.work);
+       return NOTIFY_OK;
+}
 
+static int axp288_charger_handle_cable2_evt(struct notifier_block *nb,
+                                           unsigned long event, void *param)
+{
+       struct axp288_chrg_info *info =
+               container_of(nb, struct axp288_chrg_info, cable.nb[2]);
+       schedule_work(&info->cable.work);
        return NOTIFY_OK;
 }
 
@@ -672,7 +664,17 @@ static void axp288_charger_otg_evt_worker(struct work_struct *work)
 {
        struct axp288_chrg_info *info =
            container_of(work, struct axp288_chrg_info, otg.work);
-       int ret;
+       struct extcon_dev *edev = info->otg.cable;
+       int ret, usb_host = extcon_get_state(edev, EXTCON_USB_HOST);
+
+       dev_dbg(&info->pdev->dev, "external connector USB-Host is %s\n",
+                               usb_host ? "attached" : "detached");
+
+       /*
+        * Set usb_id_short flag to avoid running charger detection logic
+        * in case usb host.
+        */
+       info->otg.id_short = usb_host;
 
        /* Disable VBUS path before enabling the 5V boost */
        ret = axp288_charger_vbus_path_select(info, !info->otg.id_short);
@@ -685,135 +687,109 @@ static int axp288_charger_handle_otg_evt(struct notifier_block *nb,
 {
        struct axp288_chrg_info *info =
            container_of(nb, struct axp288_chrg_info, otg.id_nb);
-       struct extcon_dev *edev = info->otg.cable;
-       int usb_host = extcon_get_cable_state_(edev, EXTCON_USB_HOST);
 
-       dev_dbg(&info->pdev->dev, "external connector USB-Host is %s\n",
-                               usb_host ? "attached" : "detached");
-
-       /*
-        * Set usb_id_short flag to avoid running charger detection logic
-        * in case usb host.
-        */
-       info->otg.id_short = usb_host;
        schedule_work(&info->otg.work);
 
        return NOTIFY_OK;
 }
 
-static void charger_init_hw_regs(struct axp288_chrg_info *info)
+static int charger_init_hw_regs(struct axp288_chrg_info *info)
 {
        int ret, cc, cv;
        unsigned int val;
 
        /* Program temperature thresholds */
        ret = regmap_write(info->regmap, AXP20X_V_LTF_CHRG, CHRG_VLTFC_0C);
-       if (ret < 0)
-               dev_warn(&info->pdev->dev, "register(%x) write error(%d)\n",
+       if (ret < 0) {
+               dev_err(&info->pdev->dev, "register(%x) write error(%d)\n",
                                                        AXP20X_V_LTF_CHRG, ret);
+               return ret;
+       }
 
        ret = regmap_write(info->regmap, AXP20X_V_HTF_CHRG, CHRG_VHTFC_45C);
-       if (ret < 0)
-               dev_warn(&info->pdev->dev, "register(%x) write error(%d)\n",
+       if (ret < 0) {
+               dev_err(&info->pdev->dev, "register(%x) write error(%d)\n",
                                                        AXP20X_V_HTF_CHRG, ret);
+               return ret;
+       }
 
        /* Do not turn-off charger o/p after charge cycle ends */
        ret = regmap_update_bits(info->regmap,
                                AXP20X_CHRG_CTRL2,
-                               CNTL2_CHG_OUT_TURNON, 1);
-       if (ret < 0)
-               dev_warn(&info->pdev->dev, "register(%x) write error(%d)\n",
+                               CNTL2_CHG_OUT_TURNON, CNTL2_CHG_OUT_TURNON);
+       if (ret < 0) {
+               dev_err(&info->pdev->dev, "register(%x) write error(%d)\n",
                                                AXP20X_CHRG_CTRL2, ret);
-
-       /* Enable interrupts */
-       ret = regmap_update_bits(info->regmap,
-                               AXP20X_IRQ2_EN,
-                               BAT_IRQ_CFG_BAT_MASK, 1);
-       if (ret < 0)
-               dev_warn(&info->pdev->dev, "register(%x) write error(%d)\n",
-                                               AXP20X_IRQ2_EN, ret);
-
-       ret = regmap_update_bits(info->regmap, AXP20X_IRQ3_EN,
-                               TEMP_IRQ_CFG_MASK, 1);
-       if (ret < 0)
-               dev_warn(&info->pdev->dev, "register(%x) write error(%d)\n",
-                                               AXP20X_IRQ3_EN, ret);
+               return ret;
+       }
 
        /* Setup ending condition for charging to be 10% of I(chrg) */
        ret = regmap_update_bits(info->regmap,
                                AXP20X_CHRG_CTRL1,
                                CHRG_CCCV_ITERM_20P, 0);
-       if (ret < 0)
-               dev_warn(&info->pdev->dev, "register(%x) write error(%d)\n",
+       if (ret < 0) {
+               dev_err(&info->pdev->dev, "register(%x) write error(%d)\n",
                                                AXP20X_CHRG_CTRL1, ret);
+               return ret;
+       }
 
        /* Disable OCV-SOC curve calibration */
        ret = regmap_update_bits(info->regmap,
                                AXP20X_CC_CTRL,
                                FG_CNTL_OCV_ADJ_EN, 0);
-       if (ret < 0)
-               dev_warn(&info->pdev->dev, "register(%x) write error(%d)\n",
+       if (ret < 0) {
+               dev_err(&info->pdev->dev, "register(%x) write error(%d)\n",
                                                AXP20X_CC_CTRL, ret);
-
-       /* Init charging current and voltage */
-       info->max_cc = info->pdata->max_cc;
-       info->max_cv = info->pdata->max_cv;
+               return ret;
+       }
 
        /* Read current charge voltage and current limit */
        ret = regmap_read(info->regmap, AXP20X_CHRG_CTRL1, &val);
        if (ret < 0) {
-               /* Assume default if cannot read */
-               info->cc = info->pdata->def_cc;
-               info->cv = info->pdata->def_cv;
-       } else {
-               /* Determine charge voltage */
-               cv = (val & CHRG_CCCV_CV_MASK) >> CHRG_CCCV_CV_BIT_POS;
-               switch (cv) {
-               case CHRG_CCCV_CV_4100MV:
-                       info->cv = CV_4100MV;
-                       break;
-               case CHRG_CCCV_CV_4150MV:
-                       info->cv = CV_4150MV;
-                       break;
-               case CHRG_CCCV_CV_4200MV:
-                       info->cv = CV_4200MV;
-                       break;
-               case CHRG_CCCV_CV_4350MV:
-                       info->cv = CV_4350MV;
-                       break;
-               default:
-                       info->cv = INT_MAX;
-                       break;
-               }
+               dev_err(&info->pdev->dev, "register(%x) read error(%d)\n",
+                       AXP20X_CHRG_CTRL1, ret);
+               return ret;
+       }
 
-               /* Determine charge current limit */
-               cc = (ret & CHRG_CCCV_CC_MASK) >> CHRG_CCCV_CC_BIT_POS;
-               cc = (cc * CHRG_CCCV_CC_LSB_RES) + CHRG_CCCV_CC_OFFSET;
-               info->cc = cc;
+       /* Determine charge voltage */
+       cv = (val & CHRG_CCCV_CV_MASK) >> CHRG_CCCV_CV_BIT_POS;
+       switch (cv) {
+       case CHRG_CCCV_CV_4100MV:
+               info->cv = CV_4100MV;
+               break;
+       case CHRG_CCCV_CV_4150MV:
+               info->cv = CV_4150MV;
+               break;
+       case CHRG_CCCV_CV_4200MV:
+               info->cv = CV_4200MV;
+               break;
+       case CHRG_CCCV_CV_4350MV:
+               info->cv = CV_4350MV;
+               break;
+       }
 
-               /* Program default charging voltage and current */
-               cc = min(info->pdata->def_cc, info->max_cc);
-               cv = min(info->pdata->def_cv, info->max_cv);
+       /* Determine charge current limit */
+       cc = (ret & CHRG_CCCV_CC_MASK) >> CHRG_CCCV_CC_BIT_POS;
+       cc = (cc * CHRG_CCCV_CC_LSB_RES) + CHRG_CCCV_CC_OFFSET;
+       info->cc = cc;
 
-               ret = axp288_charger_set_cc(info, cc);
-               if (ret < 0)
-                       dev_warn(&info->pdev->dev,
-                                       "error(%d) in setting CC\n", ret);
+       /*
+        * Do not allow the user to configure higher settings then those
+        * set by the firmware
+        */
+       info->max_cv = info->cv;
+       info->max_cc = info->cc;
 
-               ret = axp288_charger_set_cv(info, cv);
-               if (ret < 0)
-                       dev_warn(&info->pdev->dev,
-                                       "error(%d) in setting CV\n", ret);
-       }
+       return 0;
 }
 
 static int axp288_charger_probe(struct platform_device *pdev)
 {
        int ret, i, pirq;
        struct axp288_chrg_info *info;
+       struct device *dev = &pdev->dev;
        struct axp20x_dev *axp20x = dev_get_drvdata(pdev->dev.parent);
        struct power_supply_config charger_cfg = {};
-
        info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
        if (!info)
                return -ENOMEM;
@@ -821,15 +797,8 @@ static int axp288_charger_probe(struct platform_device *pdev)
        info->pdev = pdev;
        info->regmap = axp20x->regmap;
        info->regmap_irqc = axp20x->regmap_irqc;
-       info->pdata = pdev->dev.platform_data;
-
-       if (!info->pdata) {
-               /* Try ACPI provided pdata via device properties */
-               if (!device_property_present(&pdev->dev,
-                                               "axp288_charger_data\n"))
-                       dev_err(&pdev->dev, "failed to get platform data\n");
-               return -ENODEV;
-       }
+       info->cable.chg_type = -1;
+       info->is_charger_enabled = -1;
 
        info->cable.edev = extcon_get_extcon_dev(AXP288_EXTCON_DEV_NAME);
        if (info->cable.edev == NULL) {
@@ -838,63 +807,55 @@ static int axp288_charger_probe(struct platform_device *pdev)
                return -EPROBE_DEFER;
        }
 
-       /* Register for extcon notification */
-       INIT_WORK(&info->cable.work, axp288_charger_extcon_evt_worker);
-       info->cable.nb.notifier_call = axp288_charger_handle_cable_evt;
-       ret = extcon_register_notifier(info->cable.edev, EXTCON_CHG_USB_SDP,
-                                       &info->cable.nb);
-       if (ret) {
-               dev_err(&info->pdev->dev,
-                       "failed to register extcon notifier for SDP %d\n", ret);
-               return ret;
-       }
-
-       ret = extcon_register_notifier(info->cable.edev, EXTCON_CHG_USB_CDP,
-                                       &info->cable.nb);
-       if (ret) {
-               dev_err(&info->pdev->dev,
-                       "failed to register extcon notifier for CDP %d\n", ret);
-               extcon_unregister_notifier(info->cable.edev,
-                               EXTCON_CHG_USB_SDP, &info->cable.nb);
-               return ret;
-       }
-
-       ret = extcon_register_notifier(info->cable.edev, EXTCON_CHG_USB_DCP,
-                                       &info->cable.nb);
-       if (ret) {
-               dev_err(&info->pdev->dev,
-                       "failed to register extcon notifier for DCP %d\n", ret);
-               extcon_unregister_notifier(info->cable.edev,
-                               EXTCON_CHG_USB_SDP, &info->cable.nb);
-               extcon_unregister_notifier(info->cable.edev,
-                               EXTCON_CHG_USB_CDP, &info->cable.nb);
-               return ret;
+       info->otg.cable = extcon_get_extcon_dev(USB_HOST_EXTCON_DEV_NAME);
+       if (info->otg.cable == NULL) {
+               dev_dbg(dev, "EXTCON_USB_HOST is not ready, probe deferred\n");
+               return -EPROBE_DEFER;
        }
 
        platform_set_drvdata(pdev, info);
        mutex_init(&info->lock);
 
+       ret = charger_init_hw_regs(info);
+       if (ret)
+               return ret;
+
        /* Register with power supply class */
        charger_cfg.drv_data = info;
-       info->psy_usb = power_supply_register(&pdev->dev, &axp288_charger_desc,
-                                               &charger_cfg);
+       info->psy_usb = devm_power_supply_register(dev, &axp288_charger_desc,
+                                                  &charger_cfg);
        if (IS_ERR(info->psy_usb)) {
-               dev_err(&pdev->dev, "failed to register power supply charger\n");
                ret = PTR_ERR(info->psy_usb);
-               goto psy_reg_failed;
+               dev_err(dev, "failed to register power supply: %d\n", ret);
+               return ret;
+       }
+
+       /* Register for extcon notification */
+       INIT_WORK(&info->cable.work, axp288_charger_extcon_evt_worker);
+       info->cable.nb[0].notifier_call = axp288_charger_handle_cable0_evt;
+       info->cable.nb[1].notifier_call = axp288_charger_handle_cable1_evt;
+       info->cable.nb[2].notifier_call = axp288_charger_handle_cable2_evt;
+       for (i = 0; i < ARRAY_SIZE(cable_ids); i++) {
+               ret = devm_extcon_register_notifier(dev, info->cable.edev,
+                                         cable_ids[i], &info->cable.nb[i]);
+               if (ret) {
+                       dev_err(dev, "failed to register extcon notifier for %u: %d\n",
+                               cable_ids[i], ret);
+                       return ret;
+               }
        }
+       schedule_work(&info->cable.work);
 
        /* Register for OTG notification */
        INIT_WORK(&info->otg.work, axp288_charger_otg_evt_worker);
        info->otg.id_nb.notifier_call = axp288_charger_handle_otg_evt;
-       ret = extcon_register_notifier(info->otg.cable, EXTCON_USB_HOST,
-                                      &info->otg.id_nb);
-       if (ret)
-               dev_warn(&pdev->dev, "failed to register otg notifier\n");
-
-       if (info->otg.cable)
-               info->otg.id_short = extcon_get_cable_state_(
-                                       info->otg.cable, EXTCON_USB_HOST);
+       ret = devm_extcon_register_notifier(&pdev->dev, info->otg.cable,
+                                       EXTCON_USB_HOST, &info->otg.id_nb);
+       if (ret) {
+               dev_err(dev, "failed to register EXTCON_USB_HOST notifier\n");
+               return ret;
+       }
+       schedule_work(&info->otg.work);
 
        /* Register charger interrupts */
        for (i = 0; i < CHRG_INTR_END; i++) {
@@ -903,8 +864,7 @@ static int axp288_charger_probe(struct platform_device *pdev)
                if (info->irq[i] < 0) {
                        dev_warn(&info->pdev->dev,
                                "failed to get virtual interrupt=%d\n", pirq);
-                       ret = info->irq[i];
-                       goto intr_reg_failed;
+                       return info->irq[i];
                }
                ret = devm_request_threaded_irq(&info->pdev->dev, info->irq[i],
                                        NULL, axp288_charger_irq_thread_handler,
@@ -912,51 +872,22 @@ static int axp288_charger_probe(struct platform_device *pdev)
                if (ret) {
                        dev_err(&pdev->dev, "failed to request interrupt=%d\n",
                                                                info->irq[i]);
-                       goto intr_reg_failed;
+                       return ret;
                }
        }
 
-       charger_init_hw_regs(info);
-
        return 0;
-
-intr_reg_failed:
-       if (info->otg.cable)
-               extcon_unregister_notifier(info->otg.cable, EXTCON_USB_HOST,
-                                       &info->otg.id_nb);
-       power_supply_unregister(info->psy_usb);
-psy_reg_failed:
-       extcon_unregister_notifier(info->cable.edev, EXTCON_CHG_USB_SDP,
-                                       &info->cable.nb);
-       extcon_unregister_notifier(info->cable.edev, EXTCON_CHG_USB_CDP,
-                                       &info->cable.nb);
-       extcon_unregister_notifier(info->cable.edev, EXTCON_CHG_USB_DCP,
-                                       &info->cable.nb);
-       return ret;
 }
 
-static int axp288_charger_remove(struct platform_device *pdev)
-{
-       struct axp288_chrg_info *info =  dev_get_drvdata(&pdev->dev);
-
-       if (info->otg.cable)
-               extcon_unregister_notifier(info->otg.cable, EXTCON_USB_HOST,
-                                       &info->otg.id_nb);
-
-       extcon_unregister_notifier(info->cable.edev, EXTCON_CHG_USB_SDP,
-                                       &info->cable.nb);
-       extcon_unregister_notifier(info->cable.edev, EXTCON_CHG_USB_CDP,
-                                       &info->cable.nb);
-       extcon_unregister_notifier(info->cable.edev, EXTCON_CHG_USB_DCP,
-                                       &info->cable.nb);
-       power_supply_unregister(info->psy_usb);
-
-       return 0;
-}
+static const struct platform_device_id axp288_charger_id_table[] = {
+       { .name = "axp288_charger" },
+       {},
+};
+MODULE_DEVICE_TABLE(platform, axp288_charger_id_table);
 
 static struct platform_driver axp288_charger_driver = {
        .probe = axp288_charger_probe,
-       .remove = axp288_charger_remove,
+       .id_table = axp288_charger_id_table,
        .driver = {
                .name = "axp288_charger",
        },
index 539eb41504bb8140c91f1b76991d59f00ba70bdd..a8dcabc32721df7f6064f33b1940ac2a0182258c 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/iio/consumer.h>
 #include <linux/debugfs.h>
 #include <linux/seq_file.h>
+#include <asm/unaligned.h>
 
 #define CHRG_STAT_BAT_SAFE_MODE                (1 << 3)
 #define CHRG_STAT_BAT_VALID                    (1 << 4)
 #define CHRG_CCCV_CV_4350MV                    0x3     /* 4.35V */
 #define CHRG_CCCV_CHG_EN                       (1 << 7)
 
-#define CV_4100                                                4100    /* 4100mV */
-#define CV_4150                                                4150    /* 4150mV */
-#define CV_4200                                                4200    /* 4200mV */
-#define CV_4350                                                4350    /* 4350mV */
-
-#define TEMP_IRQ_CFG_QWBTU                     (1 << 0)
-#define TEMP_IRQ_CFG_WBTU                      (1 << 1)
-#define TEMP_IRQ_CFG_QWBTO                     (1 << 2)
-#define TEMP_IRQ_CFG_WBTO                      (1 << 3)
-#define TEMP_IRQ_CFG_MASK                      0xf
-
-#define FG_IRQ_CFG_LOWBATT_WL2         (1 << 0)
-#define FG_IRQ_CFG_LOWBATT_WL1         (1 << 1)
-#define FG_IRQ_CFG_LOWBATT_MASK                0x3
-#define LOWBAT_IRQ_STAT_LOWBATT_WL2    (1 << 0)
-#define LOWBAT_IRQ_STAT_LOWBATT_WL1    (1 << 1)
-
 #define FG_CNTL_OCV_ADJ_STAT           (1 << 2)
 #define FG_CNTL_OCV_ADJ_EN                     (1 << 3)
 #define FG_CNTL_CAP_ADJ_STAT           (1 << 4)
 #define FG_CNTL_CC_EN                          (1 << 6)
 #define FG_CNTL_GAUGE_EN                       (1 << 7)
 
+#define FG_15BIT_WORD_VALID                    (1 << 15)
+#define FG_15BIT_VAL_MASK                      0x7fff
+
 #define FG_REP_CAP_VALID                       (1 << 7)
 #define FG_REP_CAP_VAL_MASK                    0x7F
 
 #define FG_DES_CAP1_VALID                      (1 << 7)
-#define FG_DES_CAP1_VAL_MASK           0x7F
-#define FG_DES_CAP0_VAL_MASK           0xFF
 #define FG_DES_CAP_RES_LSB                     1456    /* 1.456mAhr */
 
-#define FG_CC_MTR1_VALID                       (1 << 7)
-#define FG_CC_MTR1_VAL_MASK                    0x7F
-#define FG_CC_MTR0_VAL_MASK                    0xFF
 #define FG_DES_CC_RES_LSB                      1456    /* 1.456mAhr */
 
 #define FG_OCV_CAP_VALID                       (1 << 7)
 
 /* 1.1mV per LSB expressed in uV */
 #define VOLTAGE_FROM_ADC(a)                    ((a * 11) / 10)
-/* properties converted to tenths of degrees, uV, uA, uW */
-#define PROP_TEMP(a)           ((a) * 10)
-#define UNPROP_TEMP(a)         ((a) / 10)
+/* properties converted to uV, uA */
 #define PROP_VOLT(a)           ((a) * 1000)
 #define PROP_CURR(a)           ((a) * 1000)
 
@@ -122,13 +102,13 @@ enum {
 
 struct axp288_fg_info {
        struct platform_device *pdev;
-       struct axp20x_fg_pdata *pdata;
        struct regmap *regmap;
        struct regmap_irq_chip_data *regmap_irqc;
        int irq[AXP288_FG_INTR_NUM];
        struct power_supply *bat;
        struct mutex lock;
        int status;
+       int max_volt;
        struct delayed_work status_monitor;
        struct dentry *debug_file;
 };
@@ -138,22 +118,14 @@ static enum power_supply_property fuel_gauge_props[] = {
        POWER_SUPPLY_PROP_PRESENT,
        POWER_SUPPLY_PROP_HEALTH,
        POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
-       POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
        POWER_SUPPLY_PROP_VOLTAGE_NOW,
        POWER_SUPPLY_PROP_VOLTAGE_OCV,
        POWER_SUPPLY_PROP_CURRENT_NOW,
        POWER_SUPPLY_PROP_CAPACITY,
        POWER_SUPPLY_PROP_CAPACITY_ALERT_MIN,
-       POWER_SUPPLY_PROP_TEMP,
-       POWER_SUPPLY_PROP_TEMP_MAX,
-       POWER_SUPPLY_PROP_TEMP_MIN,
-       POWER_SUPPLY_PROP_TEMP_ALERT_MIN,
-       POWER_SUPPLY_PROP_TEMP_ALERT_MAX,
        POWER_SUPPLY_PROP_TECHNOLOGY,
        POWER_SUPPLY_PROP_CHARGE_FULL,
        POWER_SUPPLY_PROP_CHARGE_NOW,
-       POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
-       POWER_SUPPLY_PROP_MODEL_NAME,
 };
 
 static int fuel_gauge_reg_readb(struct axp288_fg_info *info, int reg)
@@ -169,8 +141,10 @@ static int fuel_gauge_reg_readb(struct axp288_fg_info *info, int reg)
                        break;
        }
 
-       if (ret < 0)
+       if (ret < 0) {
                dev_err(&info->pdev->dev, "axp288 reg read err:%d\n", ret);
+               return ret;
+       }
 
        return val;
 }
@@ -187,6 +161,44 @@ static int fuel_gauge_reg_writeb(struct axp288_fg_info *info, int reg, u8 val)
        return ret;
 }
 
+static int fuel_gauge_read_15bit_word(struct axp288_fg_info *info, int reg)
+{
+       unsigned char buf[2];
+       int ret;
+
+       ret = regmap_bulk_read(info->regmap, reg, buf, 2);
+       if (ret < 0) {
+               dev_err(&info->pdev->dev, "Error reading reg 0x%02x err: %d\n",
+                       reg, ret);
+               return ret;
+       }
+
+       ret = get_unaligned_be16(buf);
+       if (!(ret & FG_15BIT_WORD_VALID)) {
+               dev_err(&info->pdev->dev, "Error reg 0x%02x contents not valid\n",
+                       reg);
+               return -ENXIO;
+       }
+
+       return ret & FG_15BIT_VAL_MASK;
+}
+
+static int fuel_gauge_read_12bit_word(struct axp288_fg_info *info, int reg)
+{
+       unsigned char buf[2];
+       int ret;
+
+       ret = regmap_bulk_read(info->regmap, reg, buf, 2);
+       if (ret < 0) {
+               dev_err(&info->pdev->dev, "Error reading reg 0x%02x err: %d\n",
+                       reg, ret);
+               return ret;
+       }
+
+       /* 12-bit data values have upper 8 bits in buf[0], lower 4 in buf[1] */
+       return (buf[0] << 4) | ((buf[1] >> 4) & 0x0f);
+}
+
 static int pmic_read_adc_val(const char *name, int *raw_val,
                struct axp288_fg_info *info)
 {
@@ -247,24 +259,15 @@ static int fuel_gauge_debug_show(struct seq_file *s, void *data)
        seq_printf(s, "    FG_RDC0[%02x] : %02x\n",
                AXP288_FG_RDC0_REG,
                fuel_gauge_reg_readb(info, AXP288_FG_RDC0_REG));
-       seq_printf(s, "    FG_OCVH[%02x] : %02x\n",
+       seq_printf(s, "     FG_OCV[%02x] : %04x\n",
                AXP288_FG_OCVH_REG,
-               fuel_gauge_reg_readb(info, AXP288_FG_OCVH_REG));
-       seq_printf(s, "    FG_OCVL[%02x] : %02x\n",
-               AXP288_FG_OCVL_REG,
-               fuel_gauge_reg_readb(info, AXP288_FG_OCVL_REG));
-       seq_printf(s, "FG_DES_CAP1[%02x] : %02x\n",
+               fuel_gauge_read_12bit_word(info, AXP288_FG_OCVH_REG));
+       seq_printf(s, " FG_DES_CAP[%02x] : %04x\n",
                AXP288_FG_DES_CAP1_REG,
-               fuel_gauge_reg_readb(info, AXP288_FG_DES_CAP1_REG));
-       seq_printf(s, "FG_DES_CAP0[%02x] : %02x\n",
-               AXP288_FG_DES_CAP0_REG,
-               fuel_gauge_reg_readb(info, AXP288_FG_DES_CAP0_REG));
-       seq_printf(s, " FG_CC_MTR1[%02x] : %02x\n",
+               fuel_gauge_read_15bit_word(info, AXP288_FG_DES_CAP1_REG));
+       seq_printf(s, "  FG_CC_MTR[%02x] : %04x\n",
                AXP288_FG_CC_MTR1_REG,
-               fuel_gauge_reg_readb(info, AXP288_FG_CC_MTR1_REG));
-       seq_printf(s, " FG_CC_MTR0[%02x] : %02x\n",
-               AXP288_FG_CC_MTR0_REG,
-               fuel_gauge_reg_readb(info, AXP288_FG_CC_MTR0_REG));
+               fuel_gauge_read_15bit_word(info, AXP288_FG_CC_MTR1_REG));
        seq_printf(s, " FG_OCV_CAP[%02x] : %02x\n",
                AXP288_FG_OCV_CAP_REG,
                fuel_gauge_reg_readb(info, AXP288_FG_OCV_CAP_REG));
@@ -417,143 +420,27 @@ current_read_fail:
        return ret;
 }
 
-static int temp_to_adc(struct axp288_fg_info *info, int tval)
-{
-       int rntc = 0, i, ret, adc_val;
-       int rmin, rmax, tmin, tmax;
-       int tcsz = info->pdata->tcsz;
-
-       /* get the Rntc resitance value for this temp */
-       if (tval > info->pdata->thermistor_curve[0][1]) {
-               rntc = info->pdata->thermistor_curve[0][0];
-       } else if (tval <= info->pdata->thermistor_curve[tcsz-1][1]) {
-               rntc = info->pdata->thermistor_curve[tcsz-1][0];
-       } else {
-               for (i = 1; i < tcsz; i++) {
-                       if (tval > info->pdata->thermistor_curve[i][1]) {
-                               rmin = info->pdata->thermistor_curve[i-1][0];
-                               rmax = info->pdata->thermistor_curve[i][0];
-                               tmin = info->pdata->thermistor_curve[i-1][1];
-                               tmax = info->pdata->thermistor_curve[i][1];
-                               rntc = rmin + ((rmax - rmin) *
-                                       (tval - tmin) / (tmax - tmin));
-                               break;
-                       }
-               }
-       }
-
-       /* we need the current to calculate the proper adc voltage */
-       ret = fuel_gauge_reg_readb(info, AXP20X_ADC_RATE);
-       if (ret < 0) {
-               dev_err(&info->pdev->dev, "%s:read err:%d\n", __func__, ret);
-               ret = 0x30;
-       }
-
-       /*
-        * temperature is proportional to NTS thermistor resistance
-        * ADC_RATE[5-4] determines current, 00=20uA,01=40uA,10=60uA,11=80uA
-        * [12-bit ADC VAL] = R_NTC(Ω) * current / 800
-        */
-       adc_val = rntc * (20 + (20 * ((ret >> 4) & 0x3))) / 800;
-
-       return adc_val;
-}
-
-static int adc_to_temp(struct axp288_fg_info *info, int adc_val)
-{
-       int ret, r, i, tval = 0;
-       int rmin, rmax, tmin, tmax;
-       int tcsz = info->pdata->tcsz;
-
-       ret = fuel_gauge_reg_readb(info, AXP20X_ADC_RATE);
-       if (ret < 0) {
-               dev_err(&info->pdev->dev, "%s:read err:%d\n", __func__, ret);
-               ret = 0x30;
-       }
-
-       /*
-        * temperature is proportional to NTS thermistor resistance
-        * ADC_RATE[5-4] determines current, 00=20uA,01=40uA,10=60uA,11=80uA
-        * R_NTC(Ω) = [12-bit ADC VAL] * 800 / current
-        */
-       r = adc_val * 800 / (20 + (20 * ((ret >> 4) & 0x3)));
-
-       if (r < info->pdata->thermistor_curve[0][0]) {
-               tval = info->pdata->thermistor_curve[0][1];
-       } else if (r >= info->pdata->thermistor_curve[tcsz-1][0]) {
-               tval = info->pdata->thermistor_curve[tcsz-1][1];
-       } else {
-               for (i = 1; i < tcsz; i++) {
-                       if (r < info->pdata->thermistor_curve[i][0]) {
-                               rmin = info->pdata->thermistor_curve[i-1][0];
-                               rmax = info->pdata->thermistor_curve[i][0];
-                               tmin = info->pdata->thermistor_curve[i-1][1];
-                               tmax = info->pdata->thermistor_curve[i][1];
-                               tval = tmin + ((tmax - tmin) *
-                                       (r - rmin) / (rmax - rmin));
-                               break;
-                       }
-               }
-       }
-
-       return tval;
-}
-
-static int fuel_gauge_get_btemp(struct axp288_fg_info *info, int *btemp)
-{
-       int ret, raw_val = 0;
-
-       ret = pmic_read_adc_val("axp288-batt-temp", &raw_val, info);
-       if (ret < 0)
-               goto temp_read_fail;
-
-       *btemp = adc_to_temp(info, raw_val);
-
-temp_read_fail:
-       return ret;
-}
-
 static int fuel_gauge_get_vocv(struct axp288_fg_info *info, int *vocv)
 {
-       int ret, value;
-
-       /* 12-bit data value, upper 8 in OCVH, lower 4 in OCVL */
-       ret = fuel_gauge_reg_readb(info, AXP288_FG_OCVH_REG);
-       if (ret < 0)
-               goto vocv_read_fail;
-       value = ret << 4;
+       int ret;
 
-       ret = fuel_gauge_reg_readb(info, AXP288_FG_OCVL_REG);
-       if (ret < 0)
-               goto vocv_read_fail;
-       value |= (ret & 0xf);
+       ret = fuel_gauge_read_12bit_word(info, AXP288_FG_OCVH_REG);
+       if (ret >= 0)
+               *vocv = VOLTAGE_FROM_ADC(ret);
 
-       *vocv = VOLTAGE_FROM_ADC(value);
-vocv_read_fail:
        return ret;
 }
 
 static int fuel_gauge_battery_health(struct axp288_fg_info *info)
 {
-       int temp, vocv;
-       int ret, health = POWER_SUPPLY_HEALTH_UNKNOWN;
-
-       ret = fuel_gauge_get_btemp(info, &temp);
-       if (ret < 0)
-               goto health_read_fail;
+       int ret, vocv, health = POWER_SUPPLY_HEALTH_UNKNOWN;
 
        ret = fuel_gauge_get_vocv(info, &vocv);
        if (ret < 0)
                goto health_read_fail;
 
-       if (vocv > info->pdata->max_volt)
+       if (vocv > info->max_volt)
                health = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
-       else if (temp > info->pdata->max_temp)
-               health = POWER_SUPPLY_HEALTH_OVERHEAT;
-       else if (temp < info->pdata->min_temp)
-               health = POWER_SUPPLY_HEALTH_COLD;
-       else if (vocv < info->pdata->min_volt)
-               health = POWER_SUPPLY_HEALTH_DEAD;
        else
                health = POWER_SUPPLY_HEALTH_GOOD;
 
@@ -561,28 +448,6 @@ health_read_fail:
        return health;
 }
 
-static int fuel_gauge_set_high_btemp_alert(struct axp288_fg_info *info)
-{
-       int ret, adc_val;
-
-       /* program temperature threshold as 1/16 ADC value */
-       adc_val = temp_to_adc(info, info->pdata->max_temp);
-       ret = fuel_gauge_reg_writeb(info, AXP20X_V_HTF_DISCHRG, adc_val >> 4);
-
-       return ret;
-}
-
-static int fuel_gauge_set_low_btemp_alert(struct axp288_fg_info *info)
-{
-       int ret, adc_val;
-
-       /* program temperature threshold as 1/16 ADC value */
-       adc_val = temp_to_adc(info, info->pdata->min_temp);
-       ret = fuel_gauge_reg_writeb(info, AXP20X_V_LTF_DISCHRG, adc_val >> 4);
-
-       return ret;
-}
-
 static int fuel_gauge_get_property(struct power_supply *ps,
                enum power_supply_property prop,
                union power_supply_propval *val)
@@ -643,58 +508,25 @@ static int fuel_gauge_get_property(struct power_supply *ps,
                        goto fuel_gauge_read_err;
                val->intval = (ret & 0x0f);
                break;
-       case POWER_SUPPLY_PROP_TEMP:
-               ret = fuel_gauge_get_btemp(info, &value);
-               if (ret < 0)
-                       goto fuel_gauge_read_err;
-               val->intval = PROP_TEMP(value);
-               break;
-       case POWER_SUPPLY_PROP_TEMP_MAX:
-       case POWER_SUPPLY_PROP_TEMP_ALERT_MAX:
-               val->intval = PROP_TEMP(info->pdata->max_temp);
-               break;
-       case POWER_SUPPLY_PROP_TEMP_MIN:
-       case POWER_SUPPLY_PROP_TEMP_ALERT_MIN:
-               val->intval = PROP_TEMP(info->pdata->min_temp);
-               break;
        case POWER_SUPPLY_PROP_TECHNOLOGY:
                val->intval = POWER_SUPPLY_TECHNOLOGY_LION;
                break;
        case POWER_SUPPLY_PROP_CHARGE_NOW:
-               ret = fuel_gauge_reg_readb(info, AXP288_FG_CC_MTR1_REG);
+               ret = fuel_gauge_read_15bit_word(info, AXP288_FG_CC_MTR1_REG);
                if (ret < 0)
                        goto fuel_gauge_read_err;
 
-               value = (ret & FG_CC_MTR1_VAL_MASK) << 8;
-               ret = fuel_gauge_reg_readb(info, AXP288_FG_CC_MTR0_REG);
-               if (ret < 0)
-                       goto fuel_gauge_read_err;
-               value |= (ret & FG_CC_MTR0_VAL_MASK);
-               val->intval = value * FG_DES_CAP_RES_LSB;
+               val->intval = ret * FG_DES_CAP_RES_LSB;
                break;
        case POWER_SUPPLY_PROP_CHARGE_FULL:
-               ret = fuel_gauge_reg_readb(info, AXP288_FG_DES_CAP1_REG);
+               ret = fuel_gauge_read_15bit_word(info, AXP288_FG_DES_CAP1_REG);
                if (ret < 0)
                        goto fuel_gauge_read_err;
 
-               value = (ret & FG_DES_CAP1_VAL_MASK) << 8;
-               ret = fuel_gauge_reg_readb(info, AXP288_FG_DES_CAP0_REG);
-               if (ret < 0)
-                       goto fuel_gauge_read_err;
-               value |= (ret & FG_DES_CAP0_VAL_MASK);
-               val->intval = value * FG_DES_CAP_RES_LSB;
-               break;
-       case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
-               val->intval = PROP_CURR(info->pdata->design_cap);
+               val->intval = ret * FG_DES_CAP_RES_LSB;
                break;
        case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
-               val->intval = PROP_VOLT(info->pdata->max_volt);
-               break;
-       case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
-               val->intval = PROP_VOLT(info->pdata->min_volt);
-               break;
-       case POWER_SUPPLY_PROP_MODEL_NAME:
-               val->strval = info->pdata->battid;
+               val->intval = PROP_VOLT(info->max_volt);
                break;
        default:
                mutex_unlock(&info->lock);
@@ -718,35 +550,6 @@ static int fuel_gauge_set_property(struct power_supply *ps,
 
        mutex_lock(&info->lock);
        switch (prop) {
-       case POWER_SUPPLY_PROP_STATUS:
-               info->status = val->intval;
-               break;
-       case POWER_SUPPLY_PROP_TEMP_MIN:
-       case POWER_SUPPLY_PROP_TEMP_ALERT_MIN:
-               if ((val->intval < PD_DEF_MIN_TEMP) ||
-                       (val->intval > PD_DEF_MAX_TEMP)) {
-                       ret = -EINVAL;
-                       break;
-               }
-               info->pdata->min_temp = UNPROP_TEMP(val->intval);
-               ret = fuel_gauge_set_low_btemp_alert(info);
-               if (ret < 0)
-                       dev_err(&info->pdev->dev,
-                               "temp alert min set fail:%d\n", ret);
-               break;
-       case POWER_SUPPLY_PROP_TEMP_MAX:
-       case POWER_SUPPLY_PROP_TEMP_ALERT_MAX:
-               if ((val->intval < PD_DEF_MIN_TEMP) ||
-                       (val->intval > PD_DEF_MAX_TEMP)) {
-                       ret = -EINVAL;
-                       break;
-               }
-               info->pdata->max_temp = UNPROP_TEMP(val->intval);
-               ret = fuel_gauge_set_high_btemp_alert(info);
-               if (ret < 0)
-                       dev_err(&info->pdev->dev,
-                               "temp alert max set fail:%d\n", ret);
-               break;
        case POWER_SUPPLY_PROP_CAPACITY_ALERT_MIN:
                if ((val->intval < 0) || (val->intval > 15)) {
                        ret = -EINVAL;
@@ -774,11 +577,6 @@ static int fuel_gauge_property_is_writeable(struct power_supply *psy,
        int ret;
 
        switch (psp) {
-       case POWER_SUPPLY_PROP_STATUS:
-       case POWER_SUPPLY_PROP_TEMP_MIN:
-       case POWER_SUPPLY_PROP_TEMP_ALERT_MIN:
-       case POWER_SUPPLY_PROP_TEMP_MAX:
-       case POWER_SUPPLY_PROP_TEMP_ALERT_MAX:
        case POWER_SUPPLY_PROP_CAPACITY_ALERT_MIN:
                ret = 1;
                break;
@@ -863,158 +661,6 @@ static const struct power_supply_desc fuel_gauge_desc = {
        .external_power_changed = fuel_gauge_external_power_changed,
 };
 
-static int fuel_gauge_set_lowbatt_thresholds(struct axp288_fg_info *info)
-{
-       int ret;
-       u8 reg_val;
-
-       ret = fuel_gauge_reg_readb(info, AXP20X_FG_RES);
-       if (ret < 0) {
-               dev_err(&info->pdev->dev, "%s:read err:%d\n", __func__, ret);
-               return ret;
-       }
-       ret = (ret & FG_REP_CAP_VAL_MASK);
-
-       if (ret > FG_LOW_CAP_WARN_THR)
-               reg_val = FG_LOW_CAP_WARN_THR;
-       else if (ret > FG_LOW_CAP_CRIT_THR)
-               reg_val = FG_LOW_CAP_CRIT_THR;
-       else
-               reg_val = FG_LOW_CAP_SHDN_THR;
-
-       reg_val |= FG_LOW_CAP_THR1_VAL;
-       ret = fuel_gauge_reg_writeb(info, AXP288_FG_LOW_CAP_REG, reg_val);
-       if (ret < 0)
-               dev_err(&info->pdev->dev, "%s:write err:%d\n", __func__, ret);
-
-       return ret;
-}
-
-static int fuel_gauge_program_vbatt_full(struct axp288_fg_info *info)
-{
-       int ret;
-       u8 val;
-
-       ret = fuel_gauge_reg_readb(info, AXP20X_CHRG_CTRL1);
-       if (ret < 0)
-               goto fg_prog_ocv_fail;
-       else
-               val = (ret & ~CHRG_CCCV_CV_MASK);
-
-       switch (info->pdata->max_volt) {
-       case CV_4100:
-               val |= (CHRG_CCCV_CV_4100MV << CHRG_CCCV_CV_BIT_POS);
-               break;
-       case CV_4150:
-               val |= (CHRG_CCCV_CV_4150MV << CHRG_CCCV_CV_BIT_POS);
-               break;
-       case CV_4200:
-               val |= (CHRG_CCCV_CV_4200MV << CHRG_CCCV_CV_BIT_POS);
-               break;
-       case CV_4350:
-               val |= (CHRG_CCCV_CV_4350MV << CHRG_CCCV_CV_BIT_POS);
-               break;
-       default:
-               val |= (CHRG_CCCV_CV_4200MV << CHRG_CCCV_CV_BIT_POS);
-               break;
-       }
-
-       ret = fuel_gauge_reg_writeb(info, AXP20X_CHRG_CTRL1, val);
-fg_prog_ocv_fail:
-       return ret;
-}
-
-static int fuel_gauge_program_design_cap(struct axp288_fg_info *info)
-{
-       int ret;
-
-       ret = fuel_gauge_reg_writeb(info,
-               AXP288_FG_DES_CAP1_REG, info->pdata->cap1);
-       if (ret < 0)
-               goto fg_prog_descap_fail;
-
-       ret = fuel_gauge_reg_writeb(info,
-               AXP288_FG_DES_CAP0_REG, info->pdata->cap0);
-
-fg_prog_descap_fail:
-       return ret;
-}
-
-static int fuel_gauge_program_ocv_curve(struct axp288_fg_info *info)
-{
-       int ret = 0, i;
-
-       for (i = 0; i < OCV_CURVE_SIZE; i++) {
-               ret = fuel_gauge_reg_writeb(info,
-                       AXP288_FG_OCV_CURVE_REG + i, info->pdata->ocv_curve[i]);
-               if (ret < 0)
-                       goto fg_prog_ocv_fail;
-       }
-
-fg_prog_ocv_fail:
-       return ret;
-}
-
-static int fuel_gauge_program_rdc_vals(struct axp288_fg_info *info)
-{
-       int ret;
-
-       ret = fuel_gauge_reg_writeb(info,
-               AXP288_FG_RDC1_REG, info->pdata->rdc1);
-       if (ret < 0)
-               goto fg_prog_ocv_fail;
-
-       ret = fuel_gauge_reg_writeb(info,
-               AXP288_FG_RDC0_REG, info->pdata->rdc0);
-
-fg_prog_ocv_fail:
-       return ret;
-}
-
-static void fuel_gauge_init_config_regs(struct axp288_fg_info *info)
-{
-       int ret;
-
-       /*
-        * check if the config data is already
-        * programmed and if so just return.
-        */
-
-       ret = fuel_gauge_reg_readb(info, AXP288_FG_DES_CAP1_REG);
-       if (ret < 0) {
-               dev_warn(&info->pdev->dev, "CAP1 reg read err!!\n");
-       } else if (!(ret & FG_DES_CAP1_VALID)) {
-               dev_info(&info->pdev->dev, "FG data needs to be initialized\n");
-       } else {
-               dev_info(&info->pdev->dev, "FG data is already initialized\n");
-               return;
-       }
-
-       ret = fuel_gauge_program_vbatt_full(info);
-       if (ret < 0)
-               dev_err(&info->pdev->dev, "set vbatt full fail:%d\n", ret);
-
-       ret = fuel_gauge_program_design_cap(info);
-       if (ret < 0)
-               dev_err(&info->pdev->dev, "set design cap fail:%d\n", ret);
-
-       ret = fuel_gauge_program_rdc_vals(info);
-       if (ret < 0)
-               dev_err(&info->pdev->dev, "set rdc fail:%d\n", ret);
-
-       ret = fuel_gauge_program_ocv_curve(info);
-       if (ret < 0)
-               dev_err(&info->pdev->dev, "set ocv curve fail:%d\n", ret);
-
-       ret = fuel_gauge_set_lowbatt_thresholds(info);
-       if (ret < 0)
-               dev_err(&info->pdev->dev, "lowbatt thr set fail:%d\n", ret);
-
-       ret = fuel_gauge_reg_writeb(info, AXP20X_CC_CTRL, 0xef);
-       if (ret < 0)
-               dev_err(&info->pdev->dev, "gauge cntl set fail:%d\n", ret);
-}
-
 static void fuel_gauge_init_irq(struct axp288_fg_info *info)
 {
        int ret, i, pirq;
@@ -1052,29 +698,6 @@ intr_failed:
        }
 }
 
-static void fuel_gauge_init_hw_regs(struct axp288_fg_info *info)
-{
-       int ret;
-       unsigned int val;
-
-       ret = fuel_gauge_set_high_btemp_alert(info);
-       if (ret < 0)
-               dev_err(&info->pdev->dev, "high batt temp set fail:%d\n", ret);
-
-       ret = fuel_gauge_set_low_btemp_alert(info);
-       if (ret < 0)
-               dev_err(&info->pdev->dev, "low batt temp set fail:%d\n", ret);
-
-       /* enable interrupts */
-       val = fuel_gauge_reg_readb(info, AXP20X_IRQ3_EN);
-       val |= TEMP_IRQ_CFG_MASK;
-       fuel_gauge_reg_writeb(info, AXP20X_IRQ3_EN, val);
-
-       val = fuel_gauge_reg_readb(info, AXP20X_IRQ4_EN);
-       val |= FG_IRQ_CFG_LOWBATT_MASK;
-       val = fuel_gauge_reg_writeb(info, AXP20X_IRQ4_EN, val);
-}
-
 static int axp288_fuel_gauge_probe(struct platform_device *pdev)
 {
        int ret = 0;
@@ -1090,15 +713,39 @@ static int axp288_fuel_gauge_probe(struct platform_device *pdev)
        info->regmap = axp20x->regmap;
        info->regmap_irqc = axp20x->regmap_irqc;
        info->status = POWER_SUPPLY_STATUS_UNKNOWN;
-       info->pdata = pdev->dev.platform_data;
-       if (!info->pdata)
-               return -ENODEV;
 
        platform_set_drvdata(pdev, info);
 
        mutex_init(&info->lock);
        INIT_DELAYED_WORK(&info->status_monitor, fuel_gauge_status_monitor);
 
+       ret = fuel_gauge_reg_readb(info, AXP288_FG_DES_CAP1_REG);
+       if (ret < 0)
+               return ret;
+
+       if (!(ret & FG_DES_CAP1_VALID)) {
+               dev_err(&pdev->dev, "axp288 not configured by firmware\n");
+               return -ENODEV;
+       }
+
+       ret = fuel_gauge_reg_readb(info, AXP20X_CHRG_CTRL1);
+       if (ret < 0)
+               return ret;
+       switch ((ret & CHRG_CCCV_CV_MASK) >> CHRG_CCCV_CV_BIT_POS) {
+       case CHRG_CCCV_CV_4100MV:
+               info->max_volt = 4100;
+               break;
+       case CHRG_CCCV_CV_4150MV:
+               info->max_volt = 4150;
+               break;
+       case CHRG_CCCV_CV_4200MV:
+               info->max_volt = 4200;
+               break;
+       case CHRG_CCCV_CV_4350MV:
+               info->max_volt = 4350;
+               break;
+       }
+
        psy_cfg.drv_data = info;
        info->bat = power_supply_register(&pdev->dev, &fuel_gauge_desc, &psy_cfg);
        if (IS_ERR(info->bat)) {
@@ -1108,12 +755,10 @@ static int axp288_fuel_gauge_probe(struct platform_device *pdev)
        }
 
        fuel_gauge_create_debugfs(info);
-       fuel_gauge_init_config_regs(info);
        fuel_gauge_init_irq(info);
-       fuel_gauge_init_hw_regs(info);
        schedule_delayed_work(&info->status_monitor, STATUS_MON_DELAY_JIFFIES);
 
-       return ret;
+       return 0;
 }
 
 static const struct platform_device_id axp288_fg_id_table[] = {
index 73e2f0b79dd4f9c67c8c4f70a5d596f46bfe4cb7..c4770a94cc8e66a744e5ea4639873e2249e32d5e 100644 (file)
@@ -1569,6 +1569,11 @@ static int bq2415x_probe(struct i2c_client *client,
                acpi_id =
                        acpi_match_device(client->dev.driver->acpi_match_table,
                                          &client->dev);
+               if (!acpi_id) {
+                       dev_err(&client->dev, "failed to match device name\n");
+                       ret = -ENODEV;
+                       goto error_1;
+               }
                name = kasprintf(GFP_KERNEL, "%s-%d", acpi_id->id, num);
        }
        if (!name) {
index e9584330aeed27f0c4e2cc7fa731ff8311e82659..a4f08492abebfb1769847db73b1b4f4c546bf6b5 100644 (file)
  * so the first read after a fault returns the latched value and subsequent
  * reads return the current value.  In order to return the fault status
  * to the user, have the interrupt handler save the reg's value and retrieve
- * it in the appropriate health/status routine.  Each routine has its own
- * flag indicating whether it should use the value stored by the last run
- * of the interrupt handler or do an actual reg read.  That way each routine
- * can report back whatever fault may have occured.
+ * it in the appropriate health/status routine.
  */
 struct bq24190_dev_info {
        struct i2c_client               *client;
@@ -159,10 +156,6 @@ struct bq24190_dev_info {
        unsigned int                    gpio_int;
        unsigned int                    irq;
        struct mutex                    f_reg_lock;
-       bool                            first_time;
-       bool                            charger_health_valid;
-       bool                            battery_health_valid;
-       bool                            battery_status_valid;
        u8                              f_reg;
        u8                              ss_reg;
        u8                              watchdog;
@@ -199,7 +192,7 @@ static const int bq24190_cvc_vreg_values[] = {
        4400000
 };
 
-/* REG06[1:0] (TREG) in tenths of degrees Celcius */
+/* REG06[1:0] (TREG) in tenths of degrees Celsius */
 static const int bq24190_ictrc_treg_values[] = {
        600, 800, 1000, 1200
 };
@@ -636,21 +629,11 @@ static int bq24190_charger_get_health(struct bq24190_dev_info *bdi,
                union power_supply_propval *val)
 {
        u8 v;
-       int health, ret;
+       int health;
 
        mutex_lock(&bdi->f_reg_lock);
-
-       if (bdi->charger_health_valid) {
-               v = bdi->f_reg;
-               bdi->charger_health_valid = false;
-               mutex_unlock(&bdi->f_reg_lock);
-       } else {
-               mutex_unlock(&bdi->f_reg_lock);
-
-               ret = bq24190_read(bdi, BQ24190_REG_F, &v);
-               if (ret < 0)
-                       return ret;
-       }
+       v = bdi->f_reg;
+       mutex_unlock(&bdi->f_reg_lock);
 
        if (v & BQ24190_REG_F_BOOST_FAULT_MASK) {
                /*
@@ -937,18 +920,8 @@ static int bq24190_battery_get_status(struct bq24190_dev_info *bdi,
        int status, ret;
 
        mutex_lock(&bdi->f_reg_lock);
-
-       if (bdi->battery_status_valid) {
-               chrg_fault = bdi->f_reg;
-               bdi->battery_status_valid = false;
-               mutex_unlock(&bdi->f_reg_lock);
-       } else {
-               mutex_unlock(&bdi->f_reg_lock);
-
-               ret = bq24190_read(bdi, BQ24190_REG_F, &chrg_fault);
-               if (ret < 0)
-                       return ret;
-       }
+       chrg_fault = bdi->f_reg;
+       mutex_unlock(&bdi->f_reg_lock);
 
        chrg_fault &= BQ24190_REG_F_CHRG_FAULT_MASK;
        chrg_fault >>= BQ24190_REG_F_CHRG_FAULT_SHIFT;
@@ -996,21 +969,11 @@ static int bq24190_battery_get_health(struct bq24190_dev_info *bdi,
                union power_supply_propval *val)
 {
        u8 v;
-       int health, ret;
+       int health;
 
        mutex_lock(&bdi->f_reg_lock);
-
-       if (bdi->battery_health_valid) {
-               v = bdi->f_reg;
-               bdi->battery_health_valid = false;
-               mutex_unlock(&bdi->f_reg_lock);
-       } else {
-               mutex_unlock(&bdi->f_reg_lock);
-
-               ret = bq24190_read(bdi, BQ24190_REG_F, &v);
-               if (ret < 0)
-                       return ret;
-       }
+       v = bdi->f_reg;
+       mutex_unlock(&bdi->f_reg_lock);
 
        if (v & BQ24190_REG_F_BAT_FAULT_MASK) {
                health = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
@@ -1197,9 +1160,12 @@ static const struct power_supply_desc bq24190_battery_desc = {
 static irqreturn_t bq24190_irq_handler_thread(int irq, void *data)
 {
        struct bq24190_dev_info *bdi = data;
-       bool alert_userspace = false;
+       const u8 battery_mask_ss = BQ24190_REG_SS_CHRG_STAT_MASK;
+       const u8 battery_mask_f = BQ24190_REG_F_BAT_FAULT_MASK
+                               | BQ24190_REG_F_NTC_FAULT_MASK;
+       bool alert_charger = false, alert_battery = false;
        u8 ss_reg = 0, f_reg = 0;
-       int ret;
+       int i, ret;
 
        pm_runtime_get_sync(bdi->dev);
 
@@ -1209,6 +1175,32 @@ static irqreturn_t bq24190_irq_handler_thread(int irq, void *data)
                goto out;
        }
 
+       i = 0;
+       do {
+               ret = bq24190_read(bdi, BQ24190_REG_F, &f_reg);
+               if (ret < 0) {
+                       dev_err(bdi->dev, "Can't read F reg: %d\n", ret);
+                       goto out;
+               }
+       } while (f_reg && ++i < 2);
+
+       if (f_reg != bdi->f_reg) {
+               dev_info(bdi->dev,
+                       "Fault: boost %d, charge %d, battery %d, ntc %d\n",
+                       !!(f_reg & BQ24190_REG_F_BOOST_FAULT_MASK),
+                       !!(f_reg & BQ24190_REG_F_CHRG_FAULT_MASK),
+                       !!(f_reg & BQ24190_REG_F_BAT_FAULT_MASK),
+                       !!(f_reg & BQ24190_REG_F_NTC_FAULT_MASK));
+
+               mutex_lock(&bdi->f_reg_lock);
+               if ((bdi->f_reg & battery_mask_f) != (f_reg & battery_mask_f))
+                       alert_battery = true;
+               if ((bdi->f_reg & ~battery_mask_f) != (f_reg & ~battery_mask_f))
+                       alert_charger = true;
+               bdi->f_reg = f_reg;
+               mutex_unlock(&bdi->f_reg_lock);
+       }
+
        if (ss_reg != bdi->ss_reg) {
                /*
                 * The device is in host mode so when PG_STAT goes from 1->0
@@ -1225,47 +1217,17 @@ static irqreturn_t bq24190_irq_handler_thread(int irq, void *data)
                                        ret);
                }
 
+               if ((bdi->ss_reg & battery_mask_ss) != (ss_reg & battery_mask_ss))
+                       alert_battery = true;
+               if ((bdi->ss_reg & ~battery_mask_ss) != (ss_reg & ~battery_mask_ss))
+                       alert_charger = true;
                bdi->ss_reg = ss_reg;
-               alert_userspace = true;
-       }
-
-       mutex_lock(&bdi->f_reg_lock);
-
-       ret = bq24190_read(bdi, BQ24190_REG_F, &f_reg);
-       if (ret < 0) {
-               mutex_unlock(&bdi->f_reg_lock);
-               dev_err(bdi->dev, "Can't read F reg: %d\n", ret);
-               goto out;
-       }
-
-       if (f_reg != bdi->f_reg) {
-               bdi->f_reg = f_reg;
-               bdi->charger_health_valid = true;
-               bdi->battery_health_valid = true;
-               bdi->battery_status_valid = true;
-
-               alert_userspace = true;
        }
 
-       mutex_unlock(&bdi->f_reg_lock);
-
-       /*
-        * Sometimes bq24190 gives a steady trickle of interrupts even
-        * though the watchdog timer is turned off and neither the STATUS
-        * nor FAULT registers have changed.  Weed out these sprurious
-        * interrupts so userspace isn't alerted for no reason.
-        * In addition, the chip always generates an interrupt after
-        * register reset so we should ignore that one (the very first
-        * interrupt received).
-        */
-       if (alert_userspace) {
-               if (!bdi->first_time) {
-                       power_supply_changed(bdi->charger);
-                       power_supply_changed(bdi->battery);
-               } else {
-                       bdi->first_time = false;
-               }
-       }
+       if (alert_charger)
+               power_supply_changed(bdi->charger);
+       if (alert_battery)
+               power_supply_changed(bdi->battery);
 
 out:
        pm_runtime_put_sync(bdi->dev);
@@ -1300,6 +1262,10 @@ static int bq24190_hw_init(struct bq24190_dev_info *bdi)
                goto out;
 
        ret = bq24190_set_mode_host(bdi);
+       if (ret < 0)
+               goto out;
+
+       ret = bq24190_read(bdi, BQ24190_REG_SS, &bdi->ss_reg);
 out:
        pm_runtime_put_sync(bdi->dev);
        return ret;
@@ -1375,10 +1341,8 @@ static int bq24190_probe(struct i2c_client *client,
        bdi->model = id->driver_data;
        strncpy(bdi->model_name, id->name, I2C_NAME_SIZE);
        mutex_init(&bdi->f_reg_lock);
-       bdi->first_time = true;
-       bdi->charger_health_valid = false;
-       bdi->battery_health_valid = false;
-       bdi->battery_status_valid = false;
+       bdi->f_reg = 0;
+       bdi->ss_reg = BQ24190_REG_SS_VBUS_STAT_MASK; /* impossible state */
 
        i2c_set_clientdata(client, bdi);
 
@@ -1392,22 +1356,13 @@ static int bq24190_probe(struct i2c_client *client,
                return -EINVAL;
        }
 
-       ret = devm_request_threaded_irq(dev, bdi->irq, NULL,
-                       bq24190_irq_handler_thread,
-                       IRQF_TRIGGER_RISING | IRQF_ONESHOT,
-                       "bq24190-charger", bdi);
-       if (ret < 0) {
-               dev_err(dev, "Can't set up irq handler\n");
-               goto out1;
-       }
-
        pm_runtime_enable(dev);
        pm_runtime_resume(dev);
 
        ret = bq24190_hw_init(bdi);
        if (ret < 0) {
                dev_err(dev, "Hardware init failed\n");
-               goto out2;
+               goto out1;
        }
 
        charger_cfg.drv_data = bdi;
@@ -1418,7 +1373,7 @@ static int bq24190_probe(struct i2c_client *client,
        if (IS_ERR(bdi->charger)) {
                dev_err(dev, "Can't register charger\n");
                ret = PTR_ERR(bdi->charger);
-               goto out2;
+               goto out1;
        }
 
        battery_cfg.drv_data = bdi;
@@ -1427,27 +1382,39 @@ static int bq24190_probe(struct i2c_client *client,
        if (IS_ERR(bdi->battery)) {
                dev_err(dev, "Can't register battery\n");
                ret = PTR_ERR(bdi->battery);
-               goto out3;
+               goto out2;
        }
 
        ret = bq24190_sysfs_create_group(bdi);
        if (ret) {
                dev_err(dev, "Can't create sysfs entries\n");
+               goto out3;
+       }
+
+       ret = devm_request_threaded_irq(dev, bdi->irq, NULL,
+                       bq24190_irq_handler_thread,
+                       IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+                       "bq24190-charger", bdi);
+       if (ret < 0) {
+               dev_err(dev, "Can't set up irq handler\n");
                goto out4;
        }
 
        return 0;
 
 out4:
-       power_supply_unregister(bdi->battery);
+       bq24190_sysfs_remove_group(bdi);
+
 out3:
-       power_supply_unregister(bdi->charger);
+       power_supply_unregister(bdi->battery);
+
 out2:
-       pm_runtime_disable(dev);
+       power_supply_unregister(bdi->charger);
+
 out1:
+       pm_runtime_disable(dev);
        if (bdi->gpio_int)
                gpio_free(bdi->gpio_int);
-
        return ret;
 }
 
@@ -1488,12 +1455,13 @@ static int bq24190_pm_resume(struct device *dev)
        struct i2c_client *client = to_i2c_client(dev);
        struct bq24190_dev_info *bdi = i2c_get_clientdata(client);
 
-       bdi->charger_health_valid = false;
-       bdi->battery_health_valid = false;
-       bdi->battery_status_valid = false;
+       bdi->f_reg = 0;
+       bdi->ss_reg = BQ24190_REG_SS_VBUS_STAT_MASK; /* impossible state */
 
        pm_runtime_get_sync(bdi->dev);
        bq24190_register_reset(bdi);
+       bq24190_set_mode_host(bdi);
+       bq24190_read(bdi, BQ24190_REG_SS, &bdi->ss_reg);
        pm_runtime_put_sync(bdi->dev);
 
        /* Things may have changed while suspended so alert upper layer */
index eb7783b42e0acdd2cf8eb4e1b75bbfdbc3df9605..eb0145380def28f16282d490535a48e3d4b0f379 100644 (file)
@@ -50,6 +50,8 @@ struct bq24735 {
        struct bq24735_platform         *pdata;
        struct mutex                    lock;
        struct gpio_desc                *status_gpio;
+       struct delayed_work             poll;
+       u32                             poll_interval;
        bool                            charging;
 };
 
@@ -105,26 +107,6 @@ static int bq24735_update_word(struct i2c_client *client, u8 reg,
        return bq24735_write_word(client, reg, tmp);
 }
 
-static inline int bq24735_enable_charging(struct bq24735 *charger)
-{
-       if (charger->pdata->ext_control)
-               return 0;
-
-       return bq24735_update_word(charger->client, BQ24735_CHG_OPT,
-                                  BQ24735_CHG_OPT_CHARGE_DISABLE,
-                                  ~BQ24735_CHG_OPT_CHARGE_DISABLE);
-}
-
-static inline int bq24735_disable_charging(struct bq24735 *charger)
-{
-       if (charger->pdata->ext_control)
-               return 0;
-
-       return bq24735_update_word(charger->client, BQ24735_CHG_OPT,
-                                  BQ24735_CHG_OPT_CHARGE_DISABLE,
-                                  BQ24735_CHG_OPT_CHARGE_DISABLE);
-}
-
 static int bq24735_config_charger(struct bq24735 *charger)
 {
        struct bq24735_platform *pdata = charger->pdata;
@@ -176,6 +158,31 @@ static int bq24735_config_charger(struct bq24735 *charger)
        return 0;
 }
 
+static inline int bq24735_enable_charging(struct bq24735 *charger)
+{
+       int ret;
+
+       if (charger->pdata->ext_control)
+               return 0;
+
+       ret = bq24735_config_charger(charger);
+       if (ret)
+               return ret;
+
+       return bq24735_update_word(charger->client, BQ24735_CHG_OPT,
+                                  BQ24735_CHG_OPT_CHARGE_DISABLE, 0);
+}
+
+static inline int bq24735_disable_charging(struct bq24735 *charger)
+{
+       if (charger->pdata->ext_control)
+               return 0;
+
+       return bq24735_update_word(charger->client, BQ24735_CHG_OPT,
+                                  BQ24735_CHG_OPT_CHARGE_DISABLE,
+                                  BQ24735_CHG_OPT_CHARGE_DISABLE);
+}
+
 static bool bq24735_charger_is_present(struct bq24735 *charger)
 {
        if (charger->status_gpio) {
@@ -185,7 +192,7 @@ static bool bq24735_charger_is_present(struct bq24735 *charger)
 
                ac = bq24735_read_word(charger->client, BQ24735_CHG_OPT);
                if (ac < 0) {
-                       dev_err(&charger->client->dev,
+                       dev_dbg(&charger->client->dev,
                                "Failed to read charger options : %d\n",
                                ac);
                        return false;
@@ -210,11 +217,8 @@ static int bq24735_charger_is_charging(struct bq24735 *charger)
        return !(ret & BQ24735_CHG_OPT_CHARGE_DISABLE);
 }
 
-static irqreturn_t bq24735_charger_isr(int irq, void *devid)
+static void bq24735_update(struct bq24735 *charger)
 {
-       struct power_supply *psy = devid;
-       struct bq24735 *charger = to_bq24735(psy);
-
        mutex_lock(&charger->lock);
 
        if (charger->charging && bq24735_charger_is_present(charger))
@@ -224,11 +228,29 @@ static irqreturn_t bq24735_charger_isr(int irq, void *devid)
 
        mutex_unlock(&charger->lock);
 
-       power_supply_changed(psy);
+       power_supply_changed(charger->charger);
+}
+
+static irqreturn_t bq24735_charger_isr(int irq, void *devid)
+{
+       struct power_supply *psy = devid;
+       struct bq24735 *charger = to_bq24735(psy);
+
+       bq24735_update(charger);
 
        return IRQ_HANDLED;
 }
 
+static void bq24735_poll(struct work_struct *work)
+{
+       struct bq24735 *charger = container_of(work, struct bq24735, poll.work);
+
+       bq24735_update(charger);
+
+       schedule_delayed_work(&charger->poll,
+                             msecs_to_jiffies(charger->poll_interval));
+}
+
 static int bq24735_charger_get_property(struct power_supply *psy,
                                        enum power_supply_property psp,
                                        union power_supply_propval *val)
@@ -276,7 +298,6 @@ static int bq24735_charger_set_property(struct power_supply *psy,
                        mutex_unlock(&charger->lock);
                        if (ret)
                                return ret;
-                       bq24735_config_charger(charger);
                        break;
                case POWER_SUPPLY_STATUS_DISCHARGING:
                case POWER_SUPPLY_STATUS_NOT_CHARGING:
@@ -395,7 +416,7 @@ static int bq24735_charger_probe(struct i2c_client *client,
                return ret;
        }
 
-       if (!charger->status_gpio || bq24735_charger_is_present(charger)) {
+       if (bq24735_charger_is_present(charger)) {
                ret = bq24735_read_word(client, BQ24735_MANUFACTURER_ID);
                if (ret < 0) {
                        dev_err(&client->dev, "Failed to read manufacturer id : %d\n",
@@ -416,16 +437,7 @@ static int bq24735_charger_probe(struct i2c_client *client,
                                "device id mismatch. 0x000b != 0x%04x\n", ret);
                        return -ENODEV;
                }
-       }
-
-       ret = bq24735_config_charger(charger);
-       if (ret < 0) {
-               dev_err(&client->dev, "failed in configuring charger");
-               return ret;
-       }
 
-       /* check for AC adapter presence */
-       if (bq24735_charger_is_present(charger)) {
                ret = bq24735_enable_charging(charger);
                if (ret < 0) {
                        dev_err(&client->dev, "Failed to enable charging\n");
@@ -456,11 +468,32 @@ static int bq24735_charger_probe(struct i2c_client *client,
                                client->irq, ret);
                        return ret;
                }
+       } else {
+               ret = device_property_read_u32(&client->dev, "poll-interval",
+                                              &charger->poll_interval);
+               if (ret)
+                       return 0;
+               if (!charger->poll_interval)
+                       return 0;
+
+               INIT_DELAYED_WORK(&charger->poll, bq24735_poll);
+               schedule_delayed_work(&charger->poll,
+                                     msecs_to_jiffies(charger->poll_interval));
        }
 
        return 0;
 }
 
+static int bq24735_charger_remove(struct i2c_client *client)
+{
+       struct bq24735 *charger = i2c_get_clientdata(client);
+
+       if (charger->poll_interval)
+               cancel_delayed_work_sync(&charger->poll);
+
+       return 0;
+}
+
 static const struct i2c_device_id bq24735_charger_id[] = {
        { "bq24735-charger", 0 },
        {}
@@ -479,6 +512,7 @@ static struct i2c_driver bq24735_charger_driver = {
                .of_match_table = bq24735_match_ids,
        },
        .probe = bq24735_charger_probe,
+       .remove = bq24735_charger_remove,
        .id_table = bq24735_charger_id,
 };
 
index 08c36b8e04bd6c804c9a94a421c9b6a2fb1dcab9..398801a21b865eef7c0fb4e1e7bce8449e239f6a 100644 (file)
  * http://www.ti.com/product/bq27010
  * http://www.ti.com/product/bq27210
  * http://www.ti.com/product/bq27500
+ * http://www.ti.com/product/bq27510-g1
+ * http://www.ti.com/product/bq27510-g2
  * http://www.ti.com/product/bq27510-g3
  * http://www.ti.com/product/bq27520-g4
+ * http://www.ti.com/product/bq27520-g1
+ * http://www.ti.com/product/bq27520-g2
+ * http://www.ti.com/product/bq27520-g3
+ * http://www.ti.com/product/bq27520-g4
  * http://www.ti.com/product/bq27530-g1
  * http://www.ti.com/product/bq27531-g1
  * http://www.ti.com/product/bq27541-g1
@@ -145,7 +151,7 @@ static u8 bq27xxx_regs[][BQ27XXX_REG_MAX] = {
                [BQ27XXX_REG_DCAP] = 0x76,
                [BQ27XXX_REG_AP] = INVALID_REG_ADDR,
        },
-       [BQ27500] = {
+       [BQ2750X] = {
                [BQ27XXX_REG_CTRL] = 0x00,
                [BQ27XXX_REG_TEMP] = 0x06,
                [BQ27XXX_REG_INT_TEMP] = 0x28,
@@ -164,7 +170,83 @@ static u8 bq27xxx_regs[][BQ27XXX_REG_MAX] = {
                [BQ27XXX_REG_DCAP] = 0x3c,
                [BQ27XXX_REG_AP] = INVALID_REG_ADDR,
        },
-       [BQ27510] = {
+       [BQ2751X] = {
+               [BQ27XXX_REG_CTRL] = 0x00,
+               [BQ27XXX_REG_TEMP] = 0x06,
+               [BQ27XXX_REG_INT_TEMP] = 0x28,
+               [BQ27XXX_REG_VOLT] = 0x08,
+               [BQ27XXX_REG_AI] = 0x14,
+               [BQ27XXX_REG_FLAGS] = 0x0a,
+               [BQ27XXX_REG_TTE] = 0x16,
+               [BQ27XXX_REG_TTF] = INVALID_REG_ADDR,
+               [BQ27XXX_REG_TTES] = 0x1a,
+               [BQ27XXX_REG_TTECP] = INVALID_REG_ADDR,
+               [BQ27XXX_REG_NAC] = 0x0c,
+               [BQ27XXX_REG_FCC] = 0x12,
+               [BQ27XXX_REG_CYCT] = 0x1e,
+               [BQ27XXX_REG_AE] = INVALID_REG_ADDR,
+               [BQ27XXX_REG_SOC] = 0x20,
+               [BQ27XXX_REG_DCAP] = 0x2e,
+               [BQ27XXX_REG_AP] = INVALID_REG_ADDR,
+       },
+       [BQ27500] = {
+               [BQ27XXX_REG_CTRL] = 0x00,
+               [BQ27XXX_REG_TEMP] = 0x06,
+               [BQ27XXX_REG_INT_TEMP] = INVALID_REG_ADDR,
+               [BQ27XXX_REG_VOLT] = 0x08,
+               [BQ27XXX_REG_AI] = 0x14,
+               [BQ27XXX_REG_FLAGS] = 0x0a,
+               [BQ27XXX_REG_TTE] = 0x16,
+               [BQ27XXX_REG_TTF] = 0x18,
+               [BQ27XXX_REG_TTES] = 0x1c,
+               [BQ27XXX_REG_TTECP] = 0x26,
+               [BQ27XXX_REG_NAC] = 0x0c,
+               [BQ27XXX_REG_FCC] = 0x12,
+               [BQ27XXX_REG_CYCT] = 0x2a,
+               [BQ27XXX_REG_AE] = 0x22,
+               [BQ27XXX_REG_SOC] = 0x2c,
+               [BQ27XXX_REG_DCAP] = 0x3c,
+               [BQ27XXX_REG_AP] = 0x24,
+       },
+       [BQ27510G1] = {
+               [BQ27XXX_REG_CTRL] = 0x00,
+               [BQ27XXX_REG_TEMP] = 0x06,
+               [BQ27XXX_REG_INT_TEMP] = INVALID_REG_ADDR,
+               [BQ27XXX_REG_VOLT] = 0x08,
+               [BQ27XXX_REG_AI] = 0x14,
+               [BQ27XXX_REG_FLAGS] = 0x0a,
+               [BQ27XXX_REG_TTE] = 0x16,
+               [BQ27XXX_REG_TTF] = 0x18,
+               [BQ27XXX_REG_TTES] = 0x1c,
+               [BQ27XXX_REG_TTECP] = 0x26,
+               [BQ27XXX_REG_NAC] = 0x0c,
+               [BQ27XXX_REG_FCC] = 0x12,
+               [BQ27XXX_REG_CYCT] = 0x2a,
+               [BQ27XXX_REG_AE] = 0x22,
+               [BQ27XXX_REG_SOC] = 0x2c,
+               [BQ27XXX_REG_DCAP] = 0x3c,
+               [BQ27XXX_REG_AP] = 0x24,
+       },
+       [BQ27510G2] = {
+               [BQ27XXX_REG_CTRL] = 0x00,
+               [BQ27XXX_REG_TEMP] = 0x06,
+               [BQ27XXX_REG_INT_TEMP] = INVALID_REG_ADDR,
+               [BQ27XXX_REG_VOLT] = 0x08,
+               [BQ27XXX_REG_AI] = 0x14,
+               [BQ27XXX_REG_FLAGS] = 0x0a,
+               [BQ27XXX_REG_TTE] = 0x16,
+               [BQ27XXX_REG_TTF] = 0x18,
+               [BQ27XXX_REG_TTES] = 0x1c,
+               [BQ27XXX_REG_TTECP] = 0x26,
+               [BQ27XXX_REG_NAC] = 0x0c,
+               [BQ27XXX_REG_FCC] = 0x12,
+               [BQ27XXX_REG_CYCT] = 0x2a,
+               [BQ27XXX_REG_AE] = 0x22,
+               [BQ27XXX_REG_SOC] = 0x2c,
+               [BQ27XXX_REG_DCAP] = 0x3c,
+               [BQ27XXX_REG_AP] = 0x24,
+       },
+       [BQ27510G3] = {
                [BQ27XXX_REG_CTRL] = 0x00,
                [BQ27XXX_REG_TEMP] = 0x06,
                [BQ27XXX_REG_INT_TEMP] = 0x28,
@@ -183,6 +265,82 @@ static u8 bq27xxx_regs[][BQ27XXX_REG_MAX] = {
                [BQ27XXX_REG_DCAP] = 0x2e,
                [BQ27XXX_REG_AP] = INVALID_REG_ADDR,
        },
+       [BQ27520G1] = {
+               [BQ27XXX_REG_CTRL] = 0x00,
+               [BQ27XXX_REG_TEMP] = 0x06,
+               [BQ27XXX_REG_INT_TEMP] = INVALID_REG_ADDR,
+               [BQ27XXX_REG_VOLT] = 0x08,
+               [BQ27XXX_REG_AI] = 0x14,
+               [BQ27XXX_REG_FLAGS] = 0x0a,
+               [BQ27XXX_REG_TTE] = 0x16,
+               [BQ27XXX_REG_TTF] = 0x18,
+               [BQ27XXX_REG_TTES] = 0x1c,
+               [BQ27XXX_REG_TTECP] = 0x26,
+               [BQ27XXX_REG_NAC] = 0x0c,
+               [BQ27XXX_REG_FCC] = 0x12,
+               [BQ27XXX_REG_CYCT] = INVALID_REG_ADDR,
+               [BQ27XXX_REG_AE] = 0x22,
+               [BQ27XXX_REG_SOC] = 0x2c,
+               [BQ27XXX_REG_DCAP] = 0x3c,
+               [BQ27XXX_REG_AP] = 0x24,
+       },
+       [BQ27520G2] = {
+               [BQ27XXX_REG_CTRL] = 0x00,
+               [BQ27XXX_REG_TEMP] = 0x06,
+               [BQ27XXX_REG_INT_TEMP] = 0x36,
+               [BQ27XXX_REG_VOLT] = 0x08,
+               [BQ27XXX_REG_AI] = 0x14,
+               [BQ27XXX_REG_FLAGS] = 0x0a,
+               [BQ27XXX_REG_TTE] = 0x16,
+               [BQ27XXX_REG_TTF] = 0x18,
+               [BQ27XXX_REG_TTES] = 0x1c,
+               [BQ27XXX_REG_TTECP] = 0x26,
+               [BQ27XXX_REG_NAC] = 0x0c,
+               [BQ27XXX_REG_FCC] = 0x12,
+               [BQ27XXX_REG_CYCT] = 0x2a,
+               [BQ27XXX_REG_AE] = 0x22,
+               [BQ27XXX_REG_SOC] = 0x2c,
+               [BQ27XXX_REG_DCAP] = 0x3c,
+               [BQ27XXX_REG_AP] = 0x24,
+       },
+       [BQ27520G3] = {
+               [BQ27XXX_REG_CTRL] = 0x00,
+               [BQ27XXX_REG_TEMP] = 0x06,
+               [BQ27XXX_REG_INT_TEMP] = 0x36,
+               [BQ27XXX_REG_VOLT] = 0x08,
+               [BQ27XXX_REG_AI] = 0x14,
+               [BQ27XXX_REG_FLAGS] = 0x0a,
+               [BQ27XXX_REG_TTE] = 0x16,
+               [BQ27XXX_REG_TTF] = INVALID_REG_ADDR,
+               [BQ27XXX_REG_TTES] = 0x1c,
+               [BQ27XXX_REG_TTECP] = 0x26,
+               [BQ27XXX_REG_NAC] = 0x0c,
+               [BQ27XXX_REG_FCC] = 0x12,
+               [BQ27XXX_REG_CYCT] = 0x2a,
+               [BQ27XXX_REG_AE] = 0x22,
+               [BQ27XXX_REG_SOC] = 0x2c,
+               [BQ27XXX_REG_DCAP] = 0x3c,
+               [BQ27XXX_REG_AP] = 0x24,
+       },
+       [BQ27520G4] = {
+               [BQ27XXX_REG_CTRL] = 0x00,
+               [BQ27XXX_REG_TEMP] = 0x06,
+               [BQ27XXX_REG_INT_TEMP] = 0x28,
+               [BQ27XXX_REG_VOLT] = 0x08,
+               [BQ27XXX_REG_AI] = 0x14,
+               [BQ27XXX_REG_FLAGS] = 0x0a,
+               [BQ27XXX_REG_TTE] = 0x16,
+               [BQ27XXX_REG_TTF] = INVALID_REG_ADDR,
+               [BQ27XXX_REG_TTES] = 0x1c,
+               [BQ27XXX_REG_TTECP] = INVALID_REG_ADDR,
+               [BQ27XXX_REG_NAC] = 0x0c,
+               [BQ27XXX_REG_FCC] = 0x12,
+               [BQ27XXX_REG_CYCT] = 0x1e,
+               [BQ27XXX_REG_AE] = INVALID_REG_ADDR,
+               [BQ27XXX_REG_SOC] = 0x20,
+               [BQ27XXX_REG_DCAP] = INVALID_REG_ADDR,
+               [BQ27XXX_REG_AP] = INVALID_REG_ADDR,
+       },
        [BQ27530] = {
                [BQ27XXX_REG_CTRL] = 0x00,
                [BQ27XXX_REG_TEMP] = 0x06,
@@ -303,7 +461,106 @@ static enum power_supply_property bq27010_battery_props[] = {
        POWER_SUPPLY_PROP_MANUFACTURER,
 };
 
+static enum power_supply_property bq2750x_battery_props[] = {
+       POWER_SUPPLY_PROP_STATUS,
+       POWER_SUPPLY_PROP_PRESENT,
+       POWER_SUPPLY_PROP_VOLTAGE_NOW,
+       POWER_SUPPLY_PROP_CURRENT_NOW,
+       POWER_SUPPLY_PROP_CAPACITY,
+       POWER_SUPPLY_PROP_CAPACITY_LEVEL,
+       POWER_SUPPLY_PROP_TEMP,
+       POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW,
+       POWER_SUPPLY_PROP_TECHNOLOGY,
+       POWER_SUPPLY_PROP_CHARGE_FULL,
+       POWER_SUPPLY_PROP_CHARGE_NOW,
+       POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+       POWER_SUPPLY_PROP_CYCLE_COUNT,
+       POWER_SUPPLY_PROP_HEALTH,
+       POWER_SUPPLY_PROP_MANUFACTURER,
+};
+
+static enum power_supply_property bq2751x_battery_props[] = {
+       POWER_SUPPLY_PROP_STATUS,
+       POWER_SUPPLY_PROP_PRESENT,
+       POWER_SUPPLY_PROP_VOLTAGE_NOW,
+       POWER_SUPPLY_PROP_CURRENT_NOW,
+       POWER_SUPPLY_PROP_CAPACITY,
+       POWER_SUPPLY_PROP_CAPACITY_LEVEL,
+       POWER_SUPPLY_PROP_TEMP,
+       POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW,
+       POWER_SUPPLY_PROP_TECHNOLOGY,
+       POWER_SUPPLY_PROP_CHARGE_FULL,
+       POWER_SUPPLY_PROP_CHARGE_NOW,
+       POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+       POWER_SUPPLY_PROP_CYCLE_COUNT,
+       POWER_SUPPLY_PROP_HEALTH,
+       POWER_SUPPLY_PROP_MANUFACTURER,
+};
+
 static enum power_supply_property bq27500_battery_props[] = {
+       POWER_SUPPLY_PROP_STATUS,
+       POWER_SUPPLY_PROP_PRESENT,
+       POWER_SUPPLY_PROP_VOLTAGE_NOW,
+       POWER_SUPPLY_PROP_CURRENT_NOW,
+       POWER_SUPPLY_PROP_CAPACITY,
+       POWER_SUPPLY_PROP_CAPACITY_LEVEL,
+       POWER_SUPPLY_PROP_TEMP,
+       POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW,
+       POWER_SUPPLY_PROP_TIME_TO_FULL_NOW,
+       POWER_SUPPLY_PROP_TECHNOLOGY,
+       POWER_SUPPLY_PROP_CHARGE_FULL,
+       POWER_SUPPLY_PROP_CHARGE_NOW,
+       POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+       POWER_SUPPLY_PROP_CYCLE_COUNT,
+       POWER_SUPPLY_PROP_ENERGY_NOW,
+       POWER_SUPPLY_PROP_POWER_AVG,
+       POWER_SUPPLY_PROP_HEALTH,
+       POWER_SUPPLY_PROP_MANUFACTURER,
+};
+
+static enum power_supply_property bq27510g1_battery_props[] = {
+       POWER_SUPPLY_PROP_STATUS,
+       POWER_SUPPLY_PROP_PRESENT,
+       POWER_SUPPLY_PROP_VOLTAGE_NOW,
+       POWER_SUPPLY_PROP_CURRENT_NOW,
+       POWER_SUPPLY_PROP_CAPACITY,
+       POWER_SUPPLY_PROP_CAPACITY_LEVEL,
+       POWER_SUPPLY_PROP_TEMP,
+       POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW,
+       POWER_SUPPLY_PROP_TIME_TO_FULL_NOW,
+       POWER_SUPPLY_PROP_TECHNOLOGY,
+       POWER_SUPPLY_PROP_CHARGE_FULL,
+       POWER_SUPPLY_PROP_CHARGE_NOW,
+       POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+       POWER_SUPPLY_PROP_CYCLE_COUNT,
+       POWER_SUPPLY_PROP_ENERGY_NOW,
+       POWER_SUPPLY_PROP_POWER_AVG,
+       POWER_SUPPLY_PROP_HEALTH,
+       POWER_SUPPLY_PROP_MANUFACTURER,
+};
+
+static enum power_supply_property bq27510g2_battery_props[] = {
+       POWER_SUPPLY_PROP_STATUS,
+       POWER_SUPPLY_PROP_PRESENT,
+       POWER_SUPPLY_PROP_VOLTAGE_NOW,
+       POWER_SUPPLY_PROP_CURRENT_NOW,
+       POWER_SUPPLY_PROP_CAPACITY,
+       POWER_SUPPLY_PROP_CAPACITY_LEVEL,
+       POWER_SUPPLY_PROP_TEMP,
+       POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW,
+       POWER_SUPPLY_PROP_TIME_TO_FULL_NOW,
+       POWER_SUPPLY_PROP_TECHNOLOGY,
+       POWER_SUPPLY_PROP_CHARGE_FULL,
+       POWER_SUPPLY_PROP_CHARGE_NOW,
+       POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+       POWER_SUPPLY_PROP_CYCLE_COUNT,
+       POWER_SUPPLY_PROP_ENERGY_NOW,
+       POWER_SUPPLY_PROP_POWER_AVG,
+       POWER_SUPPLY_PROP_HEALTH,
+       POWER_SUPPLY_PROP_MANUFACTURER,
+};
+
+static enum power_supply_property bq27510g3_battery_props[] = {
        POWER_SUPPLY_PROP_STATUS,
        POWER_SUPPLY_PROP_PRESENT,
        POWER_SUPPLY_PROP_VOLTAGE_NOW,
@@ -321,7 +578,27 @@ static enum power_supply_property bq27500_battery_props[] = {
        POWER_SUPPLY_PROP_MANUFACTURER,
 };
 
-static enum power_supply_property bq27510_battery_props[] = {
+static enum power_supply_property bq27520g1_battery_props[] = {
+       POWER_SUPPLY_PROP_STATUS,
+       POWER_SUPPLY_PROP_PRESENT,
+       POWER_SUPPLY_PROP_VOLTAGE_NOW,
+       POWER_SUPPLY_PROP_CURRENT_NOW,
+       POWER_SUPPLY_PROP_CAPACITY,
+       POWER_SUPPLY_PROP_CAPACITY_LEVEL,
+       POWER_SUPPLY_PROP_TEMP,
+       POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW,
+       POWER_SUPPLY_PROP_TIME_TO_FULL_NOW,
+       POWER_SUPPLY_PROP_TECHNOLOGY,
+       POWER_SUPPLY_PROP_CHARGE_FULL,
+       POWER_SUPPLY_PROP_CHARGE_NOW,
+       POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+       POWER_SUPPLY_PROP_ENERGY_NOW,
+       POWER_SUPPLY_PROP_POWER_AVG,
+       POWER_SUPPLY_PROP_HEALTH,
+       POWER_SUPPLY_PROP_MANUFACTURER,
+};
+
+static enum power_supply_property bq27520g2_battery_props[] = {
        POWER_SUPPLY_PROP_STATUS,
        POWER_SUPPLY_PROP_PRESENT,
        POWER_SUPPLY_PROP_VOLTAGE_NOW,
@@ -330,11 +607,51 @@ static enum power_supply_property bq27510_battery_props[] = {
        POWER_SUPPLY_PROP_CAPACITY_LEVEL,
        POWER_SUPPLY_PROP_TEMP,
        POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW,
+       POWER_SUPPLY_PROP_TIME_TO_FULL_NOW,
        POWER_SUPPLY_PROP_TECHNOLOGY,
        POWER_SUPPLY_PROP_CHARGE_FULL,
        POWER_SUPPLY_PROP_CHARGE_NOW,
        POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
        POWER_SUPPLY_PROP_CYCLE_COUNT,
+       POWER_SUPPLY_PROP_ENERGY_NOW,
+       POWER_SUPPLY_PROP_POWER_AVG,
+       POWER_SUPPLY_PROP_HEALTH,
+       POWER_SUPPLY_PROP_MANUFACTURER,
+};
+
+static enum power_supply_property bq27520g3_battery_props[] = {
+       POWER_SUPPLY_PROP_STATUS,
+       POWER_SUPPLY_PROP_PRESENT,
+       POWER_SUPPLY_PROP_VOLTAGE_NOW,
+       POWER_SUPPLY_PROP_CURRENT_NOW,
+       POWER_SUPPLY_PROP_CAPACITY,
+       POWER_SUPPLY_PROP_CAPACITY_LEVEL,
+       POWER_SUPPLY_PROP_TEMP,
+       POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW,
+       POWER_SUPPLY_PROP_TECHNOLOGY,
+       POWER_SUPPLY_PROP_CHARGE_FULL,
+       POWER_SUPPLY_PROP_CHARGE_NOW,
+       POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+       POWER_SUPPLY_PROP_CYCLE_COUNT,
+       POWER_SUPPLY_PROP_ENERGY_NOW,
+       POWER_SUPPLY_PROP_POWER_AVG,
+       POWER_SUPPLY_PROP_HEALTH,
+       POWER_SUPPLY_PROP_MANUFACTURER,
+};
+
+static enum power_supply_property bq27520g4_battery_props[] = {
+       POWER_SUPPLY_PROP_STATUS,
+       POWER_SUPPLY_PROP_PRESENT,
+       POWER_SUPPLY_PROP_VOLTAGE_NOW,
+       POWER_SUPPLY_PROP_CURRENT_NOW,
+       POWER_SUPPLY_PROP_CAPACITY,
+       POWER_SUPPLY_PROP_CAPACITY_LEVEL,
+       POWER_SUPPLY_PROP_TEMP,
+       POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW,
+       POWER_SUPPLY_PROP_TECHNOLOGY,
+       POWER_SUPPLY_PROP_CHARGE_FULL,
+       POWER_SUPPLY_PROP_CHARGE_NOW,
+       POWER_SUPPLY_PROP_CYCLE_COUNT,
        POWER_SUPPLY_PROP_HEALTH,
        POWER_SUPPLY_PROP_MANUFACTURER,
 };
@@ -421,8 +738,16 @@ static struct {
 } bq27xxx_battery_props[] = {
        BQ27XXX_PROP(BQ27000, bq27000_battery_props),
        BQ27XXX_PROP(BQ27010, bq27010_battery_props),
+       BQ27XXX_PROP(BQ2750X, bq2750x_battery_props),
+       BQ27XXX_PROP(BQ2751X, bq2751x_battery_props),
        BQ27XXX_PROP(BQ27500, bq27500_battery_props),
-       BQ27XXX_PROP(BQ27510, bq27510_battery_props),
+       BQ27XXX_PROP(BQ27510G1, bq27510g1_battery_props),
+       BQ27XXX_PROP(BQ27510G2, bq27510g2_battery_props),
+       BQ27XXX_PROP(BQ27510G3, bq27510g3_battery_props),
+       BQ27XXX_PROP(BQ27520G1, bq27520g1_battery_props),
+       BQ27XXX_PROP(BQ27520G2, bq27520g2_battery_props),
+       BQ27XXX_PROP(BQ27520G3, bq27520g3_battery_props),
+       BQ27XXX_PROP(BQ27520G4, bq27520g4_battery_props),
        BQ27XXX_PROP(BQ27530, bq27530_battery_props),
        BQ27XXX_PROP(BQ27541, bq27541_battery_props),
        BQ27XXX_PROP(BQ27545, bq27545_battery_props),
@@ -674,13 +999,26 @@ static int bq27xxx_battery_read_pwr_avg(struct bq27xxx_device_info *di)
  */
 static bool bq27xxx_battery_overtemp(struct bq27xxx_device_info *di, u16 flags)
 {
-       if (di->chip == BQ27500 || di->chip == BQ27510 ||
-           di->chip == BQ27541 || di->chip == BQ27545)
+       switch (di->chip) {
+       case BQ2750X:
+       case BQ2751X:
+       case BQ27500:
+       case BQ27510G1:
+       case BQ27510G2:
+       case BQ27510G3:
+       case BQ27520G1:
+       case BQ27520G2:
+       case BQ27520G3:
+       case BQ27520G4:
+       case BQ27541:
+       case BQ27545:
                return flags & (BQ27XXX_FLAG_OTC | BQ27XXX_FLAG_OTD);
-       if (di->chip == BQ27530 || di->chip == BQ27421)
+       case BQ27530:
+       case BQ27421:
                return flags & BQ27XXX_FLAG_OT;
-
-       return false;
+       default:
+               return false;
+       }
 }
 
 /*
index 5c5c3a6f99234dfd82cc41f4cd3ec5cd50865c7d..c68fbc3fe50a9252c1fe93a8d346190bb1a361e1 100644 (file)
@@ -148,9 +148,17 @@ static int bq27xxx_battery_i2c_remove(struct i2c_client *client)
 static const struct i2c_device_id bq27xxx_i2c_id_table[] = {
        { "bq27200", BQ27000 },
        { "bq27210", BQ27010 },
-       { "bq27500", BQ27500 },
-       { "bq27510", BQ27510 },
-       { "bq27520", BQ27510 },
+       { "bq27500", BQ2750X },
+       { "bq27510", BQ2751X },
+       { "bq27520", BQ2751X },
+       { "bq27500-1", BQ27500 },
+       { "bq27510g1", BQ27510G1 },
+       { "bq27510g2", BQ27510G2 },
+       { "bq27510g3", BQ27510G3 },
+       { "bq27520g1", BQ27520G1 },
+       { "bq27520g2", BQ27520G2 },
+       { "bq27520g3", BQ27520G3 },
+       { "bq27520g4", BQ27520G4 },
        { "bq27530", BQ27530 },
        { "bq27531", BQ27530 },
        { "bq27541", BQ27541 },
@@ -173,6 +181,14 @@ static const struct of_device_id bq27xxx_battery_i2c_of_match_table[] = {
        { .compatible = "ti,bq27500" },
        { .compatible = "ti,bq27510" },
        { .compatible = "ti,bq27520" },
+       { .compatible = "ti,bq27500-1" },
+       { .compatible = "ti,bq27510g1" },
+       { .compatible = "ti,bq27510g2" },
+       { .compatible = "ti,bq27510g3" },
+       { .compatible = "ti,bq27520g1" },
+       { .compatible = "ti,bq27520g2" },
+       { .compatible = "ti,bq27520g3" },
+       { .compatible = "ti,bq27520g4" },
        { .compatible = "ti,bq27530" },
        { .compatible = "ti,bq27531" },
        { .compatible = "ti,bq27541" },
index c5869b1941acddbe75a1221527238d6641d6daf3..001731e88718e38cd31a0314a6e3409dad722684 100644 (file)
@@ -14,7 +14,7 @@
  */
 
 #include <linux/device.h>
-#include <linux/gpio.h>
+#include <linux/gpio.h> /* For legacy platform data */
 #include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/kernel.h>
@@ -23,7 +23,7 @@
 #include <linux/power_supply.h>
 #include <linux/slab.h>
 #include <linux/of.h>
-#include <linux/of_gpio.h>
+#include <linux/gpio/consumer.h>
 
 #include <linux/power/gpio-charger.h>
 
@@ -34,6 +34,8 @@ struct gpio_charger {
 
        struct power_supply *charger;
        struct power_supply_desc charger_desc;
+       struct gpio_desc *gpiod;
+       bool legacy_gpio_requested;
 };
 
 static irqreturn_t gpio_charger_irq(int irq, void *devid)
@@ -58,7 +60,8 @@ static int gpio_charger_get_property(struct power_supply *psy,
 
        switch (psp) {
        case POWER_SUPPLY_PROP_ONLINE:
-               val->intval = !!gpio_get_value_cansleep(pdata->gpio);
+               val->intval = gpiod_get_value_cansleep(gpio_charger->gpiod);
+               /* This xor is only ever used with legacy pdata GPIO */
                val->intval ^= pdata->gpio_active_low;
                break;
        default:
@@ -78,7 +81,6 @@ struct gpio_charger_platform_data *gpio_charger_parse_dt(struct device *dev)
        struct device_node *np = dev->of_node;
        struct gpio_charger_platform_data *pdata;
        const char *chargetype;
-       enum of_gpio_flags flags;
        int ret;
 
        if (!np)
@@ -89,16 +91,6 @@ struct gpio_charger_platform_data *gpio_charger_parse_dt(struct device *dev)
                return ERR_PTR(-ENOMEM);
 
        pdata->name = np->name;
-
-       pdata->gpio = of_get_gpio_flags(np, 0, &flags);
-       if (pdata->gpio < 0) {
-               if (pdata->gpio != -EPROBE_DEFER)
-                       dev_err(dev, "could not get charger gpio\n");
-               return ERR_PTR(pdata->gpio);
-       }
-
-       pdata->gpio_active_low = !!(flags & OF_GPIO_ACTIVE_LOW);
-
        pdata->type = POWER_SUPPLY_TYPE_UNKNOWN;
        ret = of_property_read_string(np, "charger-type", &chargetype);
        if (ret >= 0) {
@@ -144,11 +136,6 @@ static int gpio_charger_probe(struct platform_device *pdev)
                }
        }
 
-       if (!gpio_is_valid(pdata->gpio)) {
-               dev_err(&pdev->dev, "Invalid gpio pin\n");
-               return -EINVAL;
-       }
-
        gpio_charger = devm_kzalloc(&pdev->dev, sizeof(*gpio_charger),
                                        GFP_KERNEL);
        if (!gpio_charger) {
@@ -156,6 +143,45 @@ static int gpio_charger_probe(struct platform_device *pdev)
                return -ENOMEM;
        }
 
+       /*
+        * This will fetch a GPIO descriptor from device tree, ACPI or
+        * boardfile descriptor tables. It's good to try this first.
+        */
+       gpio_charger->gpiod = devm_gpiod_get(&pdev->dev, NULL, GPIOD_IN);
+
+       /*
+        * If this fails and we're not using device tree, try the
+        * legacy platform data method.
+        */
+       if (IS_ERR(gpio_charger->gpiod) && !pdev->dev.of_node) {
+               /* Non-DT: use legacy GPIO numbers */
+               if (!gpio_is_valid(pdata->gpio)) {
+                       dev_err(&pdev->dev, "Invalid gpio pin in pdata\n");
+                       return -EINVAL;
+               }
+               ret = gpio_request(pdata->gpio, dev_name(&pdev->dev));
+               if (ret) {
+                       dev_err(&pdev->dev, "Failed to request gpio pin: %d\n",
+                               ret);
+                       return ret;
+               }
+               gpio_charger->legacy_gpio_requested = true;
+               ret = gpio_direction_input(pdata->gpio);
+               if (ret) {
+                       dev_err(&pdev->dev, "Failed to set gpio to input: %d\n",
+                               ret);
+                       goto err_gpio_free;
+               }
+               /* Then convert this to gpiod for now */
+               gpio_charger->gpiod = gpio_to_desc(pdata->gpio);
+       } else if (IS_ERR(gpio_charger->gpiod)) {
+               /* Just try again if this happens */
+               if (PTR_ERR(gpio_charger->gpiod) == -EPROBE_DEFER)
+                       return -EPROBE_DEFER;
+               dev_err(&pdev->dev, "error getting GPIO descriptor\n");
+               return PTR_ERR(gpio_charger->gpiod);
+       }
+
        charger_desc = &gpio_charger->charger_desc;
 
        charger_desc->name = pdata->name ? pdata->name : "gpio-charger";
@@ -169,17 +195,6 @@ static int gpio_charger_probe(struct platform_device *pdev)
        psy_cfg.of_node = pdev->dev.of_node;
        psy_cfg.drv_data = gpio_charger;
 
-       ret = gpio_request(pdata->gpio, dev_name(&pdev->dev));
-       if (ret) {
-               dev_err(&pdev->dev, "Failed to request gpio pin: %d\n", ret);
-               goto err_free;
-       }
-       ret = gpio_direction_input(pdata->gpio);
-       if (ret) {
-               dev_err(&pdev->dev, "Failed to set gpio to input: %d\n", ret);
-               goto err_gpio_free;
-       }
-
        gpio_charger->pdata = pdata;
 
        gpio_charger->charger = power_supply_register(&pdev->dev,
@@ -191,7 +206,7 @@ static int gpio_charger_probe(struct platform_device *pdev)
                goto err_gpio_free;
        }
 
-       irq = gpio_to_irq(pdata->gpio);
+       irq = gpiod_to_irq(gpio_charger->gpiod);
        if (irq > 0) {
                ret = request_any_context_irq(irq, gpio_charger_irq,
                                IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
@@ -209,8 +224,8 @@ static int gpio_charger_probe(struct platform_device *pdev)
        return 0;
 
 err_gpio_free:
-       gpio_free(pdata->gpio);
-err_free:
+       if (gpio_charger->legacy_gpio_requested)
+               gpio_free(pdata->gpio);
        return ret;
 }
 
@@ -223,7 +238,8 @@ static int gpio_charger_remove(struct platform_device *pdev)
 
        power_supply_unregister(gpio_charger->charger);
 
-       gpio_free(gpio_charger->pdata->gpio);
+       if (gpio_charger->legacy_gpio_requested)
+               gpio_free(gpio_charger->pdata->gpio);
 
        return 0;
 }
diff --git a/drivers/power/supply/intel_mid_battery.c b/drivers/power/supply/intel_mid_battery.c
deleted file mode 100644 (file)
index dc7feef..0000000
+++ /dev/null
@@ -1,795 +0,0 @@
-/*
- * intel_mid_battery.c - Intel MID PMIC Battery Driver
- *
- * Copyright (C) 2009 Intel Corporation
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.        See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- * Author: Nithish Mahalingam <nithish.mahalingam@intel.com>
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/err.h>
-#include <linux/interrupt.h>
-#include <linux/workqueue.h>
-#include <linux/jiffies.h>
-#include <linux/param.h>
-#include <linux/device.h>
-#include <linux/spi/spi.h>
-#include <linux/platform_device.h>
-#include <linux/power_supply.h>
-
-#include <asm/intel_scu_ipc.h>
-
-#define DRIVER_NAME "pmic_battery"
-
-/*********************************************************************
- *             Generic defines
- *********************************************************************/
-
-static int debug;
-module_param(debug, int, 0444);
-MODULE_PARM_DESC(debug, "Flag to enable PMIC Battery debug messages.");
-
-#define PMIC_BATT_DRV_INFO_UPDATED     1
-#define PMIC_BATT_PRESENT              1
-#define PMIC_BATT_NOT_PRESENT          0
-#define PMIC_USB_PRESENT               PMIC_BATT_PRESENT
-#define PMIC_USB_NOT_PRESENT           PMIC_BATT_NOT_PRESENT
-
-/* pmic battery register related */
-#define PMIC_BATT_CHR_SCHRGINT_ADDR    0xD2
-#define PMIC_BATT_CHR_SBATOVP_MASK     (1 << 1)
-#define PMIC_BATT_CHR_STEMP_MASK       (1 << 2)
-#define PMIC_BATT_CHR_SCOMP_MASK       (1 << 3)
-#define PMIC_BATT_CHR_SUSBDET_MASK     (1 << 4)
-#define PMIC_BATT_CHR_SBATDET_MASK     (1 << 5)
-#define PMIC_BATT_CHR_SDCLMT_MASK      (1 << 6)
-#define PMIC_BATT_CHR_SUSBOVP_MASK     (1 << 7)
-#define PMIC_BATT_CHR_EXCPT_MASK       0x86
-
-#define PMIC_BATT_ADC_ACCCHRG_MASK     (1 << 31)
-#define PMIC_BATT_ADC_ACCCHRGVAL_MASK  0x7FFFFFFF
-
-/* pmic ipc related */
-#define PMIC_BATT_CHR_IPC_FCHRG_SUBID  0x4
-#define PMIC_BATT_CHR_IPC_TCHRG_SUBID  0x6
-
-/* types of battery charging */
-enum batt_charge_type {
-       BATT_USBOTG_500MA_CHARGE,
-       BATT_USBOTG_TRICKLE_CHARGE,
-};
-
-/* valid battery events */
-enum batt_event {
-       BATT_EVENT_BATOVP_EXCPT,
-       BATT_EVENT_USBOVP_EXCPT,
-       BATT_EVENT_TEMP_EXCPT,
-       BATT_EVENT_DCLMT_EXCPT,
-       BATT_EVENT_EXCPT
-};
-
-
-/*********************************************************************
- *             Battery properties
- *********************************************************************/
-
-/*
- * pmic battery info
- */
-struct pmic_power_module_info {
-       bool is_dev_info_updated;
-       struct device *dev;
-       /* pmic battery data */
-       unsigned long update_time;              /* jiffies when data read */
-       unsigned int usb_is_present;
-       unsigned int batt_is_present;
-       unsigned int batt_health;
-       unsigned int usb_health;
-       unsigned int batt_status;
-       unsigned int batt_charge_now;           /* in mAS */
-       unsigned int batt_prev_charge_full;     /* in mAS */
-       unsigned int batt_charge_rate;          /* in units per second */
-
-       struct power_supply *usb;
-       struct power_supply *batt;
-       int irq;                                /* GPE_ID or IRQ# */
-       struct workqueue_struct *monitor_wqueue;
-       struct delayed_work monitor_battery;
-       struct work_struct handler;
-};
-
-static unsigned int delay_time = 2000; /* in ms */
-
-/*
- * pmic ac properties
- */
-static enum power_supply_property pmic_usb_props[] = {
-       POWER_SUPPLY_PROP_PRESENT,
-       POWER_SUPPLY_PROP_HEALTH,
-};
-
-/*
- * pmic battery properties
- */
-static enum power_supply_property pmic_battery_props[] = {
-       POWER_SUPPLY_PROP_STATUS,
-       POWER_SUPPLY_PROP_HEALTH,
-       POWER_SUPPLY_PROP_PRESENT,
-       POWER_SUPPLY_PROP_CHARGE_NOW,
-       POWER_SUPPLY_PROP_CHARGE_FULL,
-};
-
-
-/*
- * Glue functions for talking to the IPC
- */
-
-struct battery_property {
-       u32 capacity;   /* Charger capacity */
-       u8  crnt;       /* Quick charge current value*/
-       u8  volt;       /* Fine adjustment of constant charge voltage */
-       u8  prot;       /* CHRGPROT register value */
-       u8  prot2;      /* CHRGPROT1 register value */
-       u8  timer;      /* Charging timer */
-};
-
-#define IPCMSG_BATTERY         0xEF
-
-/* Battery coulomb counter accumulator commands */
-#define IPC_CMD_CC_WR            0 /* Update coulomb counter value */
-#define IPC_CMD_CC_RD            1 /* Read coulomb counter value */
-#define IPC_CMD_BATTERY_PROPERTY  2 /* Read Battery property */
-
-/**
- *     pmic_scu_ipc_battery_cc_read    -       read battery cc
- *     @value: battery coulomb counter read
- *
- *     Reads the battery couloumb counter value, returns 0 on success, or
- *     an error code
- *
- *     This function may sleep. Locking for SCU accesses is handled for
- *     the caller.
- */
-static int pmic_scu_ipc_battery_cc_read(u32 *value)
-{
-       return intel_scu_ipc_command(IPCMSG_BATTERY, IPC_CMD_CC_RD,
-                                       NULL, 0, value, 1);
-}
-
-/**
- *     pmic_scu_ipc_battery_property_get       -       fetch properties
- *     @prop: battery properties
- *
- *     Retrieve the battery properties from the power management
- *
- *     This function may sleep. Locking for SCU accesses is handled for
- *     the caller.
- */
-static int pmic_scu_ipc_battery_property_get(struct battery_property *prop)
-{
-       u32 data[3];
-       u8 *p = (u8 *)&data[1];
-       int err = intel_scu_ipc_command(IPCMSG_BATTERY,
-                               IPC_CMD_BATTERY_PROPERTY, NULL, 0, data, 3);
-
-       prop->capacity = data[0];
-       prop->crnt = *p++;
-       prop->volt = *p++;
-       prop->prot = *p++;
-       prop->prot2 = *p++;
-       prop->timer = *p++;
-
-       return err;
-}
-
-/**
- *     pmic_scu_ipc_set_charger        -       set charger
- *     @charger: charger to select
- *
- *     Switch the charging mode for the SCU
- */
-
-static int pmic_scu_ipc_set_charger(int charger)
-{
-       return intel_scu_ipc_simple_command(IPCMSG_BATTERY, charger);
-}
-
-/**
- * pmic_battery_log_event - log battery events
- * @event: battery event to be logged
- * Context: can sleep
- *
- * There are multiple battery events which may be of interest to users;
- * this battery function logs the different battery events onto the
- * kernel log messages.
- */
-static void pmic_battery_log_event(enum batt_event event)
-{
-       printk(KERN_WARNING "pmic-battery: ");
-       switch (event) {
-       case BATT_EVENT_BATOVP_EXCPT:
-               printk(KERN_CONT "battery overvoltage condition\n");
-               break;
-       case BATT_EVENT_USBOVP_EXCPT:
-               printk(KERN_CONT "usb charger overvoltage condition\n");
-               break;
-       case BATT_EVENT_TEMP_EXCPT:
-               printk(KERN_CONT "high battery temperature condition\n");
-               break;
-       case BATT_EVENT_DCLMT_EXCPT:
-               printk(KERN_CONT "over battery charge current condition\n");
-               break;
-       default:
-               printk(KERN_CONT "charger/battery exception %d\n", event);
-               break;
-       }
-}
-
-/**
- * pmic_battery_read_status - read battery status information
- * @pbi: device info structure to update the read information
- * Context: can sleep
- *
- * PMIC power source information need to be updated based on the data read
- * from the PMIC battery registers.
- *
- */
-static void pmic_battery_read_status(struct pmic_power_module_info *pbi)
-{
-       unsigned int update_time_intrvl;
-       unsigned int chrg_val;
-       u32 ccval;
-       u8 r8;
-       struct battery_property batt_prop;
-       int batt_present = 0;
-       int usb_present = 0;
-       int batt_exception = 0;
-
-       /* make sure the last batt_status read happened delay_time before */
-       if (pbi->update_time && time_before(jiffies, pbi->update_time +
-                                               msecs_to_jiffies(delay_time)))
-               return;
-
-       update_time_intrvl = jiffies_to_msecs(jiffies - pbi->update_time);
-       pbi->update_time = jiffies;
-
-       /* read coulomb counter registers and schrgint register */
-       if (pmic_scu_ipc_battery_cc_read(&ccval)) {
-               dev_warn(pbi->dev, "%s(): ipc config cmd failed\n",
-                                                               __func__);
-               return;
-       }
-
-       if (intel_scu_ipc_ioread8(PMIC_BATT_CHR_SCHRGINT_ADDR, &r8)) {
-               dev_warn(pbi->dev, "%s(): ipc pmic read failed\n",
-                                                               __func__);
-               return;
-       }
-
-       /*
-        * set pmic_power_module_info members based on pmic register values
-        * read.
-        */
-
-       /* set batt_is_present */
-       if (r8 & PMIC_BATT_CHR_SBATDET_MASK) {
-               pbi->batt_is_present = PMIC_BATT_PRESENT;
-               batt_present = 1;
-       } else {
-               pbi->batt_is_present = PMIC_BATT_NOT_PRESENT;
-               pbi->batt_health = POWER_SUPPLY_HEALTH_UNKNOWN;
-               pbi->batt_status = POWER_SUPPLY_STATUS_UNKNOWN;
-       }
-
-       /* set batt_health */
-       if (batt_present) {
-               if (r8 & PMIC_BATT_CHR_SBATOVP_MASK) {
-                       pbi->batt_health = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
-                       pbi->batt_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
-                       pmic_battery_log_event(BATT_EVENT_BATOVP_EXCPT);
-                       batt_exception = 1;
-               } else if (r8 & PMIC_BATT_CHR_STEMP_MASK) {
-                       pbi->batt_health = POWER_SUPPLY_HEALTH_OVERHEAT;
-                       pbi->batt_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
-                       pmic_battery_log_event(BATT_EVENT_TEMP_EXCPT);
-                       batt_exception = 1;
-               } else {
-                       pbi->batt_health = POWER_SUPPLY_HEALTH_GOOD;
-                       if (r8 & PMIC_BATT_CHR_SDCLMT_MASK) {
-                               /* PMIC will change charging current automatically */
-                               pmic_battery_log_event(BATT_EVENT_DCLMT_EXCPT);
-                       }
-               }
-       }
-
-       /* set usb_is_present */
-       if (r8 & PMIC_BATT_CHR_SUSBDET_MASK) {
-               pbi->usb_is_present = PMIC_USB_PRESENT;
-               usb_present = 1;
-       } else {
-               pbi->usb_is_present = PMIC_USB_NOT_PRESENT;
-               pbi->usb_health = POWER_SUPPLY_HEALTH_UNKNOWN;
-       }
-
-       if (usb_present) {
-               if (r8 & PMIC_BATT_CHR_SUSBOVP_MASK) {
-                       pbi->usb_health = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
-                       pmic_battery_log_event(BATT_EVENT_USBOVP_EXCPT);
-               } else {
-                       pbi->usb_health = POWER_SUPPLY_HEALTH_GOOD;
-               }
-       }
-
-       chrg_val = ccval & PMIC_BATT_ADC_ACCCHRGVAL_MASK;
-
-       /* set batt_prev_charge_full to battery capacity the first time */
-       if (!pbi->is_dev_info_updated) {
-               if (pmic_scu_ipc_battery_property_get(&batt_prop)) {
-                       dev_warn(pbi->dev, "%s(): ipc config cmd failed\n",
-                                                               __func__);
-                       return;
-               }
-               pbi->batt_prev_charge_full = batt_prop.capacity;
-       }
-
-       /* set batt_status */
-       if (batt_present && !batt_exception) {
-               if (r8 & PMIC_BATT_CHR_SCOMP_MASK) {
-                       pbi->batt_status = POWER_SUPPLY_STATUS_FULL;
-                       pbi->batt_prev_charge_full = chrg_val;
-               } else if (ccval & PMIC_BATT_ADC_ACCCHRG_MASK) {
-                       pbi->batt_status = POWER_SUPPLY_STATUS_DISCHARGING;
-               } else {
-                       pbi->batt_status = POWER_SUPPLY_STATUS_CHARGING;
-               }
-       }
-
-       /* set batt_charge_rate */
-       if (pbi->is_dev_info_updated && batt_present && !batt_exception) {
-               if (pbi->batt_status == POWER_SUPPLY_STATUS_DISCHARGING) {
-                       if (pbi->batt_charge_now - chrg_val) {
-                               pbi->batt_charge_rate = ((pbi->batt_charge_now -
-                                       chrg_val) * 1000 * 60) /
-                                       update_time_intrvl;
-                       }
-               } else if (pbi->batt_status == POWER_SUPPLY_STATUS_CHARGING) {
-                       if (chrg_val - pbi->batt_charge_now) {
-                               pbi->batt_charge_rate = ((chrg_val -
-                                       pbi->batt_charge_now) * 1000 * 60) /
-                                       update_time_intrvl;
-                       }
-               } else
-                       pbi->batt_charge_rate = 0;
-       } else {
-               pbi->batt_charge_rate = -1;
-       }
-
-       /* batt_charge_now */
-       if (batt_present && !batt_exception)
-               pbi->batt_charge_now = chrg_val;
-       else
-               pbi->batt_charge_now = -1;
-
-       pbi->is_dev_info_updated = PMIC_BATT_DRV_INFO_UPDATED;
-}
-
-/**
- * pmic_usb_get_property - usb power source get property
- * @psy: usb power supply context
- * @psp: usb power source property
- * @val: usb power source property value
- * Context: can sleep
- *
- * PMIC usb power source property needs to be provided to power_supply
- * subsytem for it to provide the information to users.
- */
-static int pmic_usb_get_property(struct power_supply *psy,
-                               enum power_supply_property psp,
-                               union power_supply_propval *val)
-{
-       struct pmic_power_module_info *pbi = power_supply_get_drvdata(psy);
-
-       /* update pmic_power_module_info members */
-       pmic_battery_read_status(pbi);
-
-       switch (psp) {
-       case POWER_SUPPLY_PROP_PRESENT:
-               val->intval = pbi->usb_is_present;
-               break;
-       case POWER_SUPPLY_PROP_HEALTH:
-               val->intval = pbi->usb_health;
-               break;
-       default:
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static inline unsigned long mAStouAh(unsigned long v)
-{
-       /* seconds to hours, mA to ÂµA */
-       return (v * 1000) / 3600;
-}
-
-/**
- * pmic_battery_get_property - battery power source get property
- * @psy: battery power supply context
- * @psp: battery power source property
- * @val: battery power source property value
- * Context: can sleep
- *
- * PMIC battery power source property needs to be provided to power_supply
- * subsytem for it to provide the information to users.
- */
-static int pmic_battery_get_property(struct power_supply *psy,
-                               enum power_supply_property psp,
-                               union power_supply_propval *val)
-{
-       struct pmic_power_module_info *pbi = power_supply_get_drvdata(psy);
-
-       /* update pmic_power_module_info members */
-       pmic_battery_read_status(pbi);
-
-       switch (psp) {
-       case POWER_SUPPLY_PROP_STATUS:
-               val->intval = pbi->batt_status;
-               break;
-       case POWER_SUPPLY_PROP_HEALTH:
-               val->intval = pbi->batt_health;
-               break;
-       case POWER_SUPPLY_PROP_PRESENT:
-               val->intval = pbi->batt_is_present;
-               break;
-       case POWER_SUPPLY_PROP_CHARGE_NOW:
-               val->intval = mAStouAh(pbi->batt_charge_now);
-               break;
-       case POWER_SUPPLY_PROP_CHARGE_FULL:
-               val->intval = mAStouAh(pbi->batt_prev_charge_full);
-               break;
-       default:
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-/**
- * pmic_battery_monitor - monitor battery status
- * @work: work structure
- * Context: can sleep
- *
- * PMIC battery status needs to be monitored for any change
- * and information needs to be frequently updated.
- */
-static void pmic_battery_monitor(struct work_struct *work)
-{
-       struct pmic_power_module_info *pbi = container_of(work,
-                       struct pmic_power_module_info, monitor_battery.work);
-
-       /* update pmic_power_module_info members */
-       pmic_battery_read_status(pbi);
-       queue_delayed_work(pbi->monitor_wqueue, &pbi->monitor_battery, HZ * 10);
-}
-
-/**
- * pmic_battery_set_charger - set battery charger
- * @pbi: device info structure
- * @chrg: charge mode to set battery charger in
- * Context: can sleep
- *
- * PMIC battery charger needs to be enabled based on the usb charge
- * capabilities connected to the platform.
- */
-static int pmic_battery_set_charger(struct pmic_power_module_info *pbi,
-                                               enum batt_charge_type chrg)
-{
-       int retval;
-
-       /* set usblmt bits and chrgcntl register bits appropriately */
-       switch (chrg) {
-       case BATT_USBOTG_500MA_CHARGE:
-               retval = pmic_scu_ipc_set_charger(PMIC_BATT_CHR_IPC_FCHRG_SUBID);
-               break;
-       case BATT_USBOTG_TRICKLE_CHARGE:
-               retval = pmic_scu_ipc_set_charger(PMIC_BATT_CHR_IPC_TCHRG_SUBID);
-               break;
-       default:
-               dev_warn(pbi->dev, "%s(): out of range usb charger "
-                                               "charge detected\n", __func__);
-               return -EINVAL;
-       }
-
-       if (retval) {
-               dev_warn(pbi->dev, "%s(): ipc pmic read failed\n",
-                                                               __func__);
-               return retval;
-       }
-
-       return 0;
-}
-
-/**
- * pmic_battery_interrupt_handler - pmic battery interrupt handler
- * Context: interrupt context
- *
- * PMIC battery interrupt handler which will be called with either
- * battery full condition occurs or usb otg & battery connect
- * condition occurs.
- */
-static irqreturn_t pmic_battery_interrupt_handler(int id, void *dev)
-{
-       struct pmic_power_module_info *pbi = dev;
-
-       schedule_work(&pbi->handler);
-
-       return IRQ_HANDLED;
-}
-
-/**
- * pmic_battery_handle_intrpt - pmic battery service interrupt
- * @work: work structure
- * Context: can sleep
- *
- * PMIC battery needs to either update the battery status as full
- * if it detects battery full condition caused the interrupt or needs
- * to enable battery charger if it detects usb and battery detect
- * caused the source of interrupt.
- */
-static void pmic_battery_handle_intrpt(struct work_struct *work)
-{
-       struct pmic_power_module_info *pbi = container_of(work,
-                               struct pmic_power_module_info, handler);
-       enum batt_charge_type chrg;
-       u8 r8;
-
-       if (intel_scu_ipc_ioread8(PMIC_BATT_CHR_SCHRGINT_ADDR, &r8)) {
-               dev_warn(pbi->dev, "%s(): ipc pmic read failed\n",
-                                                               __func__);
-               return;
-       }
-       /* find the cause of the interrupt */
-       if (r8 & PMIC_BATT_CHR_SBATDET_MASK) {
-               pbi->batt_is_present = PMIC_BATT_PRESENT;
-       } else {
-               pbi->batt_is_present = PMIC_BATT_NOT_PRESENT;
-               pbi->batt_health = POWER_SUPPLY_HEALTH_UNKNOWN;
-               pbi->batt_status = POWER_SUPPLY_STATUS_UNKNOWN;
-               return;
-       }
-
-       if (r8 & PMIC_BATT_CHR_EXCPT_MASK) {
-               pbi->batt_health = POWER_SUPPLY_HEALTH_UNKNOWN;
-               pbi->batt_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
-               pbi->usb_health = POWER_SUPPLY_HEALTH_UNKNOWN;
-               pmic_battery_log_event(BATT_EVENT_EXCPT);
-               return;
-       } else {
-               pbi->batt_health = POWER_SUPPLY_HEALTH_GOOD;
-               pbi->usb_health = POWER_SUPPLY_HEALTH_GOOD;
-       }
-
-       if (r8 & PMIC_BATT_CHR_SCOMP_MASK) {
-               u32 ccval;
-               pbi->batt_status = POWER_SUPPLY_STATUS_FULL;
-
-               if (pmic_scu_ipc_battery_cc_read(&ccval)) {
-                       dev_warn(pbi->dev, "%s(): ipc config cmd "
-                                                       "failed\n", __func__);
-                       return;
-               }
-               pbi->batt_prev_charge_full = ccval &
-                                               PMIC_BATT_ADC_ACCCHRGVAL_MASK;
-               return;
-       }
-
-       if (r8 & PMIC_BATT_CHR_SUSBDET_MASK) {
-               pbi->usb_is_present = PMIC_USB_PRESENT;
-       } else {
-               pbi->usb_is_present = PMIC_USB_NOT_PRESENT;
-               pbi->usb_health = POWER_SUPPLY_HEALTH_UNKNOWN;
-               return;
-       }
-
-       /* setup battery charging */
-
-#if 0
-       /* check usb otg power capability and set charger accordingly */
-       retval = langwell_udc_maxpower(&power);
-       if (retval) {
-               dev_warn(pbi->dev,
-                   "%s(): usb otg power query failed with error code %d\n",
-                       __func__, retval);
-               return;
-       }
-
-       if (power >= 500)
-               chrg = BATT_USBOTG_500MA_CHARGE;
-       else
-#endif
-               chrg = BATT_USBOTG_TRICKLE_CHARGE;
-
-       /* enable battery charging */
-       if (pmic_battery_set_charger(pbi, chrg)) {
-               dev_warn(pbi->dev,
-                       "%s(): failed to set up battery charging\n", __func__);
-               return;
-       }
-
-       dev_dbg(pbi->dev,
-               "pmic-battery: %s() - setting up battery charger successful\n",
-                       __func__);
-}
-
-/*
- * Description of power supplies
- */
-static const struct power_supply_desc pmic_usb_desc = {
-       .name           = "pmic-usb",
-       .type           = POWER_SUPPLY_TYPE_USB,
-       .properties     = pmic_usb_props,
-       .num_properties = ARRAY_SIZE(pmic_usb_props),
-       .get_property   = pmic_usb_get_property,
-};
-
-static const struct power_supply_desc pmic_batt_desc = {
-       .name           = "pmic-batt",
-       .type           = POWER_SUPPLY_TYPE_BATTERY,
-       .properties     = pmic_battery_props,
-       .num_properties = ARRAY_SIZE(pmic_battery_props),
-       .get_property   = pmic_battery_get_property,
-};
-
-/**
- * pmic_battery_probe - pmic battery initialize
- * @irq: pmic battery device irq
- * @dev: pmic battery device structure
- * Context: can sleep
- *
- * PMIC battery initializes its internal data structue and other
- * infrastructure components for it to work as expected.
- */
-static int probe(int irq, struct device *dev)
-{
-       int retval = 0;
-       struct pmic_power_module_info *pbi;
-       struct power_supply_config psy_cfg = {};
-
-       dev_dbg(dev, "pmic-battery: found pmic battery device\n");
-
-       pbi = kzalloc(sizeof(*pbi), GFP_KERNEL);
-       if (!pbi) {
-               dev_err(dev, "%s(): memory allocation failed\n",
-                                                               __func__);
-               return -ENOMEM;
-       }
-
-       pbi->dev = dev;
-       pbi->irq = irq;
-       dev_set_drvdata(dev, pbi);
-       psy_cfg.drv_data = pbi;
-
-       /* initialize all required framework before enabling interrupts */
-       INIT_WORK(&pbi->handler, pmic_battery_handle_intrpt);
-       INIT_DELAYED_WORK(&pbi->monitor_battery, pmic_battery_monitor);
-       pbi->monitor_wqueue = alloc_workqueue(dev_name(dev), WQ_MEM_RECLAIM, 0);
-       if (!pbi->monitor_wqueue) {
-               dev_err(dev, "%s(): wqueue init failed\n", __func__);
-               retval = -ESRCH;
-               goto wqueue_failed;
-       }
-
-       /* register interrupt */
-       retval = request_irq(pbi->irq, pmic_battery_interrupt_handler,
-                                                       0, DRIVER_NAME, pbi);
-       if (retval) {
-               dev_err(dev, "%s(): cannot get IRQ\n", __func__);
-               goto requestirq_failed;
-       }
-
-       /* register pmic-batt with power supply subsystem */
-       pbi->batt = power_supply_register(dev, &pmic_usb_desc, &psy_cfg);
-       if (IS_ERR(pbi->batt)) {
-               dev_err(dev,
-                       "%s(): failed to register pmic battery device with power supply subsystem\n",
-                               __func__);
-               retval = PTR_ERR(pbi->batt);
-               goto power_reg_failed;
-       }
-
-       dev_dbg(dev, "pmic-battery: %s() - pmic battery device "
-               "registration with power supply subsystem successful\n",
-               __func__);
-
-       queue_delayed_work(pbi->monitor_wqueue, &pbi->monitor_battery, HZ * 1);
-
-       /* register pmic-usb with power supply subsystem */
-       pbi->usb = power_supply_register(dev, &pmic_batt_desc, &psy_cfg);
-       if (IS_ERR(pbi->usb)) {
-               dev_err(dev,
-                       "%s(): failed to register pmic usb device with power supply subsystem\n",
-                               __func__);
-               retval = PTR_ERR(pbi->usb);
-               goto power_reg_failed_1;
-       }
-
-       if (debug)
-               printk(KERN_INFO "pmic-battery: %s() - pmic usb device "
-                       "registration with power supply subsystem successful\n",
-                       __func__);
-
-       return retval;
-
-power_reg_failed_1:
-       power_supply_unregister(pbi->batt);
-power_reg_failed:
-       cancel_delayed_work_sync(&pbi->monitor_battery);
-requestirq_failed:
-       destroy_workqueue(pbi->monitor_wqueue);
-wqueue_failed:
-       kfree(pbi);
-
-       return retval;
-}
-
-static int platform_pmic_battery_probe(struct platform_device *pdev)
-{
-       return probe(pdev->id, &pdev->dev);
-}
-
-/**
- * pmic_battery_remove - pmic battery finalize
- * @dev: pmic battery device structure
- * Context: can sleep
- *
- * PMIC battery finalizes its internal data structue and other
- * infrastructure components that it initialized in
- * pmic_battery_probe.
- */
-
-static int platform_pmic_battery_remove(struct platform_device *pdev)
-{
-       struct pmic_power_module_info *pbi = platform_get_drvdata(pdev);
-
-       free_irq(pbi->irq, pbi);
-       cancel_delayed_work_sync(&pbi->monitor_battery);
-       destroy_workqueue(pbi->monitor_wqueue);
-
-       power_supply_unregister(pbi->usb);
-       power_supply_unregister(pbi->batt);
-
-       cancel_work_sync(&pbi->handler);
-       kfree(pbi);
-       return 0;
-}
-
-static struct platform_driver platform_pmic_battery_driver = {
-       .driver = {
-               .name = DRIVER_NAME,
-       },
-       .probe = platform_pmic_battery_probe,
-       .remove = platform_pmic_battery_remove,
-};
-
-module_platform_driver(platform_pmic_battery_driver);
-
-MODULE_AUTHOR("Nithish Mahalingam <nithish.mahalingam@intel.com>");
-MODULE_DESCRIPTION("Intel Moorestown PMIC Battery Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/power/supply/max14656_charger_detector.c b/drivers/power/supply/max14656_charger_detector.c
new file mode 100644 (file)
index 0000000..b91b1d2
--- /dev/null
@@ -0,0 +1,327 @@
+/*
+ * Maxim MAX14656 / AL32 USB Charger Detector driver
+ *
+ * Copyright (C) 2014 LG Electronics, Inc
+ * Copyright (C) 2016 Alexander Kurz <akurz@blala.de>
+ *
+ * Components from Maxim AL32 Charger detection Driver for MX50 Yoshi Board
+ * Copyright (C) Amazon Technologies Inc. All rights reserved.
+ * Manish Lachwani (lachwani@lab126.com)
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+#include <linux/of_device.h>
+#include <linux/workqueue.h>
+#include <linux/power_supply.h>
+
+#define MAX14656_MANUFACTURER  "Maxim Integrated"
+#define MAX14656_NAME          "max14656"
+
+#define MAX14656_DEVICE_ID     0x00
+#define MAX14656_INTERRUPT_1   0x01
+#define MAX14656_INTERRUPT_2   0x02
+#define MAX14656_STATUS_1      0x03
+#define MAX14656_STATUS_2      0x04
+#define MAX14656_INTMASK_1     0x05
+#define MAX14656_INTMASK_2     0x06
+#define MAX14656_CONTROL_1     0x07
+#define MAX14656_CONTROL_2     0x08
+#define MAX14656_CONTROL_3     0x09
+
+#define DEVICE_VENDOR_MASK     0xf0
+#define DEVICE_REV_MASK                0x0f
+#define INT_EN_REG_MASK                BIT(4)
+#define CHG_TYPE_INT_MASK      BIT(0)
+#define STATUS1_VB_VALID_MASK  BIT(4)
+#define STATUS1_CHG_TYPE_MASK  0xf
+#define INT1_DCD_TIMEOUT_MASK  BIT(7)
+#define CONTROL1_DEFAULT       0x0d
+#define CONTROL1_INT_EN                BIT(4)
+#define CONTROL1_INT_ACTIVE_HIGH       BIT(5)
+#define CONTROL1_EDGE          BIT(7)
+#define CONTROL2_DEFAULT       0x8e
+#define CONTROL2_ADC_EN                BIT(0)
+#define CONTROL3_DEFAULT       0x8d
+
+enum max14656_chg_type {
+       MAX14656_NO_CHARGER     = 0,
+       MAX14656_SDP_CHARGER,
+       MAX14656_CDP_CHARGER,
+       MAX14656_DCP_CHARGER,
+       MAX14656_APPLE_500MA_CHARGER,
+       MAX14656_APPLE_1A_CHARGER,
+       MAX14656_APPLE_2A_CHARGER,
+       MAX14656_SPECIAL_500MA_CHARGER,
+       MAX14656_APPLE_12W,
+       MAX14656_CHARGER_LAST
+};
+
+static const struct max14656_chg_type_props {
+       enum power_supply_type type;
+} chg_type_props[] = {
+       { POWER_SUPPLY_TYPE_UNKNOWN },
+       { POWER_SUPPLY_TYPE_USB },
+       { POWER_SUPPLY_TYPE_USB_CDP },
+       { POWER_SUPPLY_TYPE_USB_DCP },
+       { POWER_SUPPLY_TYPE_USB_DCP },
+       { POWER_SUPPLY_TYPE_USB_DCP },
+       { POWER_SUPPLY_TYPE_USB_DCP },
+       { POWER_SUPPLY_TYPE_USB_DCP },
+       { POWER_SUPPLY_TYPE_USB },
+};
+
+struct max14656_chip {
+       struct i2c_client       *client;
+       struct power_supply     *detect_psy;
+       struct power_supply_desc psy_desc;
+       struct delayed_work     irq_work;
+
+       int irq;
+       int online;
+};
+
+static int max14656_read_reg(struct i2c_client *client, int reg, u8 *val)
+{
+       s32 ret;
+
+       ret = i2c_smbus_read_byte_data(client, reg);
+       if (ret < 0) {
+               dev_err(&client->dev,
+                       "i2c read fail: can't read from %02x: %d\n",
+                       reg, ret);
+               return ret;
+       }
+       *val = ret;
+       return 0;
+}
+
+static int max14656_write_reg(struct i2c_client *client, int reg, u8 val)
+{
+       s32 ret;
+
+       ret = i2c_smbus_write_byte_data(client, reg, val);
+       if (ret < 0) {
+               dev_err(&client->dev,
+                       "i2c write fail: can't write %02x to %02x: %d\n",
+                       val, reg, ret);
+               return ret;
+       }
+       return 0;
+}
+
+static int max14656_read_block_reg(struct i2c_client *client, u8 reg,
+                                 u8 length, u8 *val)
+{
+       int ret;
+
+       ret = i2c_smbus_read_i2c_block_data(client, reg, length, val);
+       if (ret < 0) {
+               dev_err(&client->dev, "failed to block read reg 0x%x: %d\n",
+                               reg, ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+#define        REG_TOTAL_NUM   5
+static void max14656_irq_worker(struct work_struct *work)
+{
+       struct max14656_chip *chip =
+               container_of(work, struct max14656_chip, irq_work.work);
+
+       u8 buf[REG_TOTAL_NUM];
+       u8 chg_type;
+       int ret = 0;
+
+       ret = max14656_read_block_reg(chip->client, MAX14656_DEVICE_ID,
+                                     REG_TOTAL_NUM, buf);
+
+       if ((buf[MAX14656_STATUS_1] & STATUS1_VB_VALID_MASK) &&
+               (buf[MAX14656_STATUS_1] & STATUS1_CHG_TYPE_MASK)) {
+               chg_type = buf[MAX14656_STATUS_1] & STATUS1_CHG_TYPE_MASK;
+               if (chg_type < MAX14656_CHARGER_LAST)
+                       chip->psy_desc.type = chg_type_props[chg_type].type;
+               else
+                       chip->psy_desc.type = POWER_SUPPLY_TYPE_UNKNOWN;
+               chip->online = 1;
+       } else {
+               chip->online = 0;
+               chip->psy_desc.type = POWER_SUPPLY_TYPE_UNKNOWN;
+       }
+
+       power_supply_changed(chip->detect_psy);
+}
+
+static irqreturn_t max14656_irq(int irq, void *dev_id)
+{
+       struct max14656_chip *chip = dev_id;
+
+       schedule_delayed_work(&chip->irq_work, msecs_to_jiffies(100));
+
+       return IRQ_HANDLED;
+}
+
+static int max14656_hw_init(struct max14656_chip *chip)
+{
+       uint8_t val = 0;
+       uint8_t rev;
+       struct i2c_client *client = chip->client;
+
+       if (max14656_read_reg(client, MAX14656_DEVICE_ID, &val))
+               return -ENODEV;
+
+       if ((val & DEVICE_VENDOR_MASK) != 0x20) {
+               dev_err(&client->dev, "wrong vendor ID %d\n",
+                       ((val & DEVICE_VENDOR_MASK) >> 4));
+               return -ENODEV;
+       }
+       rev = val & DEVICE_REV_MASK;
+
+       /* Turn on ADC_EN */
+       if (max14656_write_reg(client, MAX14656_CONTROL_2, CONTROL2_ADC_EN))
+               return -EINVAL;
+
+       /* turn on interrupts and low power mode */
+       if (max14656_write_reg(client, MAX14656_CONTROL_1,
+               CONTROL1_DEFAULT |
+               CONTROL1_INT_EN |
+               CONTROL1_INT_ACTIVE_HIGH |
+               CONTROL1_EDGE))
+               return -EINVAL;
+
+       if (max14656_write_reg(client, MAX14656_INTMASK_1, 0x3))
+               return -EINVAL;
+
+       if (max14656_write_reg(client, MAX14656_INTMASK_2, 0x1))
+               return -EINVAL;
+
+       dev_info(&client->dev, "detected revision %d\n", rev);
+       return 0;
+}
+
+static int max14656_get_property(struct power_supply *psy,
+                           enum power_supply_property psp,
+                           union power_supply_propval *val)
+{
+       struct max14656_chip *chip = power_supply_get_drvdata(psy);
+
+       switch (psp) {
+       case POWER_SUPPLY_PROP_ONLINE:
+               val->intval = chip->online;
+               break;
+       case POWER_SUPPLY_PROP_MODEL_NAME:
+               val->strval = MAX14656_NAME;
+               break;
+       case POWER_SUPPLY_PROP_MANUFACTURER:
+               val->strval = MAX14656_MANUFACTURER;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static enum power_supply_property max14656_battery_props[] = {
+       POWER_SUPPLY_PROP_ONLINE,
+       POWER_SUPPLY_PROP_MODEL_NAME,
+       POWER_SUPPLY_PROP_MANUFACTURER,
+};
+
+static int max14656_probe(struct i2c_client *client,
+                         const struct i2c_device_id *id)
+{
+       struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
+       struct device *dev = &client->dev;
+       struct power_supply_config psy_cfg = {};
+       struct max14656_chip *chip;
+       int irq = client->irq;
+       int ret = 0;
+
+       if (irq <= 0) {
+               dev_err(dev, "invalid irq number: %d\n", irq);
+               return -ENODEV;
+       }
+
+       if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
+               dev_err(dev, "No support for SMBUS_BYTE_DATA\n");
+               return -ENODEV;
+       }
+
+       chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
+       if (!chip)
+               return -ENOMEM;
+
+       psy_cfg.drv_data = chip;
+       chip->client = client;
+       chip->online = 0;
+       chip->psy_desc.name = MAX14656_NAME;
+       chip->psy_desc.type = POWER_SUPPLY_TYPE_UNKNOWN;
+       chip->psy_desc.properties = max14656_battery_props;
+       chip->psy_desc.num_properties = ARRAY_SIZE(max14656_battery_props);
+       chip->psy_desc.get_property = max14656_get_property;
+       chip->irq = irq;
+
+       ret = max14656_hw_init(chip);
+       if (ret)
+               return -ENODEV;
+
+       INIT_DELAYED_WORK(&chip->irq_work, max14656_irq_worker);
+
+       ret = devm_request_irq(dev, chip->irq, max14656_irq,
+                              IRQF_TRIGGER_FALLING,
+                              MAX14656_NAME, chip);
+       if (ret) {
+               dev_err(dev, "request_irq %d failed\n", chip->irq);
+               return -EINVAL;
+       }
+       enable_irq_wake(chip->irq);
+
+       chip->detect_psy = devm_power_supply_register(dev,
+                      &chip->psy_desc, &psy_cfg);
+       if (IS_ERR(chip->detect_psy)) {
+               dev_err(dev, "power_supply_register failed\n");
+               return -EINVAL;
+       }
+
+       schedule_delayed_work(&chip->irq_work, msecs_to_jiffies(2000));
+
+       return 0;
+}
+
+static const struct i2c_device_id max14656_id[] = {
+       { "max14656", 0 },
+       {}
+};
+MODULE_DEVICE_TABLE(i2c, max14656_id);
+
+static const struct of_device_id max14656_match_table[] = {
+       { .compatible = "maxim,max14656", },
+       {}
+};
+MODULE_DEVICE_TABLE(of, max14656_match_table);
+
+static struct i2c_driver max14656_i2c_driver = {
+       .driver = {
+               .name   = "max14656",
+               .of_match_table = max14656_match_table,
+       },
+       .probe          = max14656_probe,
+       .id_table       = max14656_id,
+};
+module_i2c_driver(max14656_i2c_driver);
+
+MODULE_DESCRIPTION("MAX14656 USB charger detector");
+MODULE_LICENSE("GPL v2");
index 290ddc12b0405218865edf427cb5e87ad67e785d..fa861003fece2a5cad7dcdbddf293a8faf2e2cba 100644 (file)
@@ -148,10 +148,8 @@ static int max8997_battery_probe(struct platform_device *pdev)
 
        charger = devm_kzalloc(&pdev->dev, sizeof(struct charger_data),
                                GFP_KERNEL);
-       if (charger == NULL) {
-               dev_err(&pdev->dev, "Cannot allocate memory.\n");
+       if (!charger)
                return -ENOMEM;
-       }
 
        platform_set_drvdata(pdev, charger);
 
@@ -161,7 +159,7 @@ static int max8997_battery_probe(struct platform_device *pdev)
 
        psy_cfg.drv_data = charger;
 
-       charger->battery = power_supply_register(&pdev->dev,
+       charger->battery = devm_power_supply_register(&pdev->dev,
                                                 &max8997_battery_desc,
                                                 &psy_cfg);
        if (IS_ERR(charger->battery)) {
@@ -172,14 +170,6 @@ static int max8997_battery_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int max8997_battery_remove(struct platform_device *pdev)
-{
-       struct charger_data *charger = platform_get_drvdata(pdev);
-
-       power_supply_unregister(charger->battery);
-       return 0;
-}
-
 static const struct platform_device_id max8997_battery_id[] = {
        { "max8997-battery", 0 },
        { }
@@ -191,7 +181,6 @@ static struct platform_driver max8997_battery_driver = {
                .name = "max8997-battery",
        },
        .probe = max8997_battery_probe,
-       .remove = max8997_battery_remove,
        .id_table = max8997_battery_id,
 };
 
index d05597b4e40f5a613271f8bf0021560508e15997..b3c1873ad84db529888dec73596acabd4206d6c5 100644 (file)
@@ -393,7 +393,6 @@ static int pcf50633_mbc_probe(struct platform_device *pdev)
 {
        struct power_supply_config psy_cfg = {};
        struct pcf50633_mbc *mbc;
-       int ret;
        int i;
        u8 mbcs1;
 
@@ -419,8 +418,7 @@ static int pcf50633_mbc_probe(struct platform_device *pdev)
                                             &psy_cfg);
        if (IS_ERR(mbc->adapter)) {
                dev_err(mbc->pcf->dev, "failed to register adapter\n");
-               ret = PTR_ERR(mbc->adapter);
-               return ret;
+               return PTR_ERR(mbc->adapter);
        }
 
        mbc->usb = power_supply_register(&pdev->dev, &pcf50633_mbc_usb_desc,
@@ -428,8 +426,7 @@ static int pcf50633_mbc_probe(struct platform_device *pdev)
        if (IS_ERR(mbc->usb)) {
                dev_err(mbc->pcf->dev, "failed to register usb\n");
                power_supply_unregister(mbc->adapter);
-               ret = PTR_ERR(mbc->usb);
-               return ret;
+               return PTR_ERR(mbc->usb);
        }
 
        mbc->ac = power_supply_register(&pdev->dev, &pcf50633_mbc_ac_desc,
@@ -438,12 +435,10 @@ static int pcf50633_mbc_probe(struct platform_device *pdev)
                dev_err(mbc->pcf->dev, "failed to register ac\n");
                power_supply_unregister(mbc->adapter);
                power_supply_unregister(mbc->usb);
-               ret = PTR_ERR(mbc->ac);
-               return ret;
+               return PTR_ERR(mbc->ac);
        }
 
-       ret = sysfs_create_group(&pdev->dev.kobj, &mbc_attr_group);
-       if (ret)
+       if (sysfs_create_group(&pdev->dev.kobj, &mbc_attr_group))
                dev_err(mbc->pcf->dev, "failed to create sysfs entries\n");
 
        mbcs1 = pcf50633_reg_read(mbc->pcf, PCF50633_REG_MBCS1);
index b5896ba2a60262b4531d8cc11e8fbe30b33b70d0..f6a0d245731dd72d2aa7dd45ea157593238380ce 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/regmap.h>
 #include <linux/slab.h>
 #include <linux/extcon.h>
+#include <linux/regulator/driver.h>
 
 #define SMBB_CHG_VMAX          0x040
 #define SMBB_CHG_VSAFE         0x041
@@ -72,6 +73,8 @@
 #define BTC_CTRL_HOT_EXT_N     BIT(0)
 
 #define SMBB_USB_IMAX          0x344
+#define SMBB_USB_OTG_CTL       0x348
+#define OTG_CTL_EN             BIT(0)
 #define SMBB_USB_ENUM_TIMER_STOP 0x34e
 #define ENUM_TIMER_STOP                BIT(0)
 #define SMBB_USB_SEC_ACCESS    0x3d0
@@ -125,6 +128,9 @@ struct smbb_charger {
        struct power_supply *dc_psy;
        struct power_supply *bat_psy;
        struct regmap *regmap;
+
+       struct regulator_desc otg_rdesc;
+       struct regulator_dev *otg_reg;
 };
 
 static const unsigned int smbb_usb_extcon_cable[] = {
@@ -378,7 +384,7 @@ static irqreturn_t smbb_usb_valid_handler(int irq, void *_data)
        struct smbb_charger *chg = _data;
 
        smbb_set_line_flag(chg, irq, STATUS_USBIN_VALID);
-       extcon_set_cable_state_(chg->edev, EXTCON_USB,
+       extcon_set_state_sync(chg->edev, EXTCON_USB,
                                chg->status & STATUS_USBIN_VALID);
        power_supply_changed(chg->usb_psy);
 
@@ -787,12 +793,56 @@ static const struct power_supply_desc dc_psy_desc = {
        .property_is_writeable = smbb_charger_writable_property,
 };
 
+static int smbb_chg_otg_enable(struct regulator_dev *rdev)
+{
+       struct smbb_charger *chg = rdev_get_drvdata(rdev);
+       int rc;
+
+       rc = regmap_update_bits(chg->regmap, chg->addr + SMBB_USB_OTG_CTL,
+                               OTG_CTL_EN, OTG_CTL_EN);
+       if (rc)
+               dev_err(chg->dev, "failed to update OTG_CTL\n");
+       return rc;
+}
+
+static int smbb_chg_otg_disable(struct regulator_dev *rdev)
+{
+       struct smbb_charger *chg = rdev_get_drvdata(rdev);
+       int rc;
+
+       rc = regmap_update_bits(chg->regmap, chg->addr + SMBB_USB_OTG_CTL,
+                               OTG_CTL_EN, 0);
+       if (rc)
+               dev_err(chg->dev, "failed to update OTG_CTL\n");
+       return rc;
+}
+
+static int smbb_chg_otg_is_enabled(struct regulator_dev *rdev)
+{
+       struct smbb_charger *chg = rdev_get_drvdata(rdev);
+       unsigned int value = 0;
+       int rc;
+
+       rc = regmap_read(chg->regmap, chg->addr + SMBB_USB_OTG_CTL, &value);
+       if (rc)
+               dev_err(chg->dev, "failed to read OTG_CTL\n");
+
+       return !!(value & OTG_CTL_EN);
+}
+
+static const struct regulator_ops smbb_chg_otg_ops = {
+       .enable = smbb_chg_otg_enable,
+       .disable = smbb_chg_otg_disable,
+       .is_enabled = smbb_chg_otg_is_enabled,
+};
+
 static int smbb_charger_probe(struct platform_device *pdev)
 {
        struct power_supply_config bat_cfg = {};
        struct power_supply_config usb_cfg = {};
        struct power_supply_config dc_cfg = {};
        struct smbb_charger *chg;
+       struct regulator_config config = { };
        int rc, i;
 
        chg = devm_kzalloc(&pdev->dev, sizeof(*chg), GFP_KERNEL);
@@ -905,6 +955,26 @@ static int smbb_charger_probe(struct platform_device *pdev)
                }
        }
 
+       /*
+        * otg regulator is used to control VBUS voltage direction
+        * when USB switches between host and gadget mode
+        */
+       chg->otg_rdesc.id = -1;
+       chg->otg_rdesc.name = "otg-vbus";
+       chg->otg_rdesc.ops = &smbb_chg_otg_ops;
+       chg->otg_rdesc.owner = THIS_MODULE;
+       chg->otg_rdesc.type = REGULATOR_VOLTAGE;
+       chg->otg_rdesc.supply_name = "usb-otg-in";
+       chg->otg_rdesc.of_match = "otg-vbus";
+
+       config.dev = &pdev->dev;
+       config.driver_data = chg;
+
+       chg->otg_reg = devm_regulator_register(&pdev->dev, &chg->otg_rdesc,
+                                              &config);
+       if (IS_ERR(chg->otg_reg))
+               return PTR_ERR(chg->otg_reg);
+
        chg->jeita_ext_temp = of_property_read_bool(pdev->dev.of_node,
                        "qcom,jeita-extended-temp-range");
 
diff --git a/drivers/power/supply/sbs-charger.c b/drivers/power/supply/sbs-charger.c
new file mode 100644 (file)
index 0000000..353765a
--- /dev/null
@@ -0,0 +1,274 @@
+/*
+ * Copyright (c) 2016, Prodys S.L.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This adds support for sbs-charger compilant chips as defined here:
+ * http://sbs-forum.org/specs/sbc110.pdf
+ *
+ * Implemetation based on sbs-battery.c
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/power_supply.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/regmap.h>
+#include <linux/of_gpio.h>
+#include <linux/bitops.h>
+
+#define SBS_CHARGER_REG_SPEC_INFO              0x11
+#define SBS_CHARGER_REG_STATUS                 0x13
+#define SBS_CHARGER_REG_ALARM_WARNING          0x16
+
+#define SBS_CHARGER_STATUS_CHARGE_INHIBITED    BIT(1)
+#define SBS_CHARGER_STATUS_RES_COLD            BIT(9)
+#define SBS_CHARGER_STATUS_RES_HOT             BIT(10)
+#define SBS_CHARGER_STATUS_BATTERY_PRESENT     BIT(14)
+#define SBS_CHARGER_STATUS_AC_PRESENT          BIT(15)
+
+#define SBS_CHARGER_POLL_TIME                  500
+
+struct sbs_info {
+       struct i2c_client               *client;
+       struct power_supply             *power_supply;
+       struct regmap                   *regmap;
+       struct delayed_work             work;
+       unsigned int                    last_state;
+};
+
+static int sbs_get_property(struct power_supply *psy,
+                           enum power_supply_property psp,
+                           union power_supply_propval *val)
+{
+       struct sbs_info *chip = power_supply_get_drvdata(psy);
+       unsigned int reg;
+
+       reg = chip->last_state;
+
+       switch (psp) {
+       case POWER_SUPPLY_PROP_PRESENT:
+               val->intval = !!(reg & SBS_CHARGER_STATUS_BATTERY_PRESENT);
+               break;
+
+       case POWER_SUPPLY_PROP_ONLINE:
+               val->intval = !!(reg & SBS_CHARGER_STATUS_AC_PRESENT);
+               break;
+
+       case POWER_SUPPLY_PROP_STATUS:
+               val->intval = POWER_SUPPLY_STATUS_UNKNOWN;
+
+               if (!(reg & SBS_CHARGER_STATUS_BATTERY_PRESENT))
+                       val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
+               else if (reg & SBS_CHARGER_STATUS_AC_PRESENT &&
+                        !(reg & SBS_CHARGER_STATUS_CHARGE_INHIBITED))
+                       val->intval = POWER_SUPPLY_STATUS_CHARGING;
+               else
+                       val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
+
+               break;
+
+       case POWER_SUPPLY_PROP_HEALTH:
+               if (reg & SBS_CHARGER_STATUS_RES_COLD)
+                       val->intval = POWER_SUPPLY_HEALTH_COLD;
+               if (reg & SBS_CHARGER_STATUS_RES_HOT)
+                       val->intval = POWER_SUPPLY_HEALTH_OVERHEAT;
+               else
+                       val->intval = POWER_SUPPLY_HEALTH_GOOD;
+
+               break;
+
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int sbs_check_state(struct sbs_info *chip)
+{
+       unsigned int reg;
+       int ret;
+
+       ret = regmap_read(chip->regmap, SBS_CHARGER_REG_STATUS, &reg);
+       if (!ret && reg != chip->last_state) {
+               chip->last_state = reg;
+               power_supply_changed(chip->power_supply);
+               return 1;
+       }
+
+       return 0;
+}
+
+static void sbs_delayed_work(struct work_struct *work)
+{
+       struct sbs_info *chip = container_of(work, struct sbs_info, work.work);
+
+       sbs_check_state(chip);
+
+       schedule_delayed_work(&chip->work,
+                             msecs_to_jiffies(SBS_CHARGER_POLL_TIME));
+}
+
+static irqreturn_t sbs_irq_thread(int irq, void *data)
+{
+       struct sbs_info *chip = data;
+       int ret;
+
+       ret = sbs_check_state(chip);
+
+       return ret ? IRQ_HANDLED : IRQ_NONE;
+}
+
+static enum power_supply_property sbs_properties[] = {
+       POWER_SUPPLY_PROP_STATUS,
+       POWER_SUPPLY_PROP_PRESENT,
+       POWER_SUPPLY_PROP_ONLINE,
+       POWER_SUPPLY_PROP_HEALTH,
+};
+
+static bool sbs_readable_reg(struct device *dev, unsigned int reg)
+{
+       if (reg < SBS_CHARGER_REG_SPEC_INFO)
+               return false;
+       else
+               return true;
+}
+
+static bool sbs_volatile_reg(struct device *dev, unsigned int reg)
+{
+       switch (reg) {
+       case SBS_CHARGER_REG_STATUS:
+               return true;
+       }
+
+       return false;
+}
+
+static const struct regmap_config sbs_regmap = {
+       .reg_bits       = 8,
+       .val_bits       = 16,
+       .max_register   = SBS_CHARGER_REG_ALARM_WARNING,
+       .readable_reg   = sbs_readable_reg,
+       .volatile_reg   = sbs_volatile_reg,
+       .val_format_endian = REGMAP_ENDIAN_LITTLE, /* since based on SMBus */
+};
+
+static const struct power_supply_desc sbs_desc = {
+       .name = "sbs-charger",
+       .type = POWER_SUPPLY_TYPE_MAINS,
+       .properties = sbs_properties,
+       .num_properties = ARRAY_SIZE(sbs_properties),
+       .get_property = sbs_get_property,
+};
+
+static int sbs_probe(struct i2c_client *client,
+                    const struct i2c_device_id *id)
+{
+       struct power_supply_config psy_cfg = {};
+       struct sbs_info *chip;
+       int ret, val;
+
+       chip = devm_kzalloc(&client->dev, sizeof(struct sbs_info), GFP_KERNEL);
+       if (!chip)
+               return -ENOMEM;
+
+       chip->client = client;
+       psy_cfg.of_node = client->dev.of_node;
+       psy_cfg.drv_data = chip;
+
+       i2c_set_clientdata(client, chip);
+
+       chip->regmap = devm_regmap_init_i2c(client, &sbs_regmap);
+       if (IS_ERR(chip->regmap))
+               return PTR_ERR(chip->regmap);
+
+       /*
+        * Before we register, we need to make sure we can actually talk
+        * to the battery.
+        */
+       ret = regmap_read(chip->regmap, SBS_CHARGER_REG_STATUS, &val);
+       if (ret) {
+               dev_err(&client->dev, "Failed to get device status\n");
+               return ret;
+       }
+       chip->last_state = val;
+
+       chip->power_supply = devm_power_supply_register(&client->dev, &sbs_desc,
+                                                       &psy_cfg);
+       if (IS_ERR(chip->power_supply)) {
+               dev_err(&client->dev, "Failed to register power supply\n");
+               return PTR_ERR(chip->power_supply);
+       }
+
+       /*
+        * The sbs-charger spec doesn't impose the use of an interrupt. So in
+        * the case it wasn't provided we use polling in order get the charger's
+        * status.
+        */
+       if (client->irq) {
+               ret = devm_request_threaded_irq(&client->dev, client->irq,
+                                       NULL, sbs_irq_thread,
+                                       IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+                                       dev_name(&client->dev), chip);
+               if (ret) {
+                       dev_err(&client->dev, "Failed to request irq, %d\n", ret);
+                       return ret;
+               }
+       } else {
+               INIT_DELAYED_WORK(&chip->work, sbs_delayed_work);
+               schedule_delayed_work(&chip->work,
+                                     msecs_to_jiffies(SBS_CHARGER_POLL_TIME));
+       }
+
+       dev_info(&client->dev,
+                "%s: smart charger device registered\n", client->name);
+
+       return 0;
+}
+
+static int sbs_remove(struct i2c_client *client)
+{
+       struct sbs_info *chip = i2c_get_clientdata(client);
+
+       cancel_delayed_work_sync(&chip->work);
+
+       return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id sbs_dt_ids[] = {
+       { .compatible = "sbs,sbs-charger" },
+       { },
+};
+MODULE_DEVICE_TABLE(of, sbs_dt_ids);
+#endif
+
+static const struct i2c_device_id sbs_id[] = {
+       { "sbs-charger", 0 },
+       { }
+};
+MODULE_DEVICE_TABLE(i2c, sbs_id);
+
+static struct i2c_driver sbs_driver = {
+       .probe          = sbs_probe,
+       .remove         = sbs_remove,
+       .id_table       = sbs_id,
+       .driver = {
+               .name   = "sbs-charger",
+               .of_match_table = of_match_ptr(sbs_dt_ids),
+       },
+};
+module_i2c_driver(sbs_driver);
+
+MODULE_AUTHOR("Nicolas Saenz Julienne <nicolassaenzj@gmail.com>");
+MODULE_DESCRIPTION("SBS smart charger driver");
+MODULE_LICENSE("GPL v2");
index 9fd019f9b88c4ac1ed2c0988d13486c5f4b26b6f..29b61e81b38541f749982de0c7d8d1d6eddeb353 100644 (file)
 #include <linux/mfd/core.h>
 #include <linux/mfd/tps65217.h>
 
+#define CHARGER_STATUS_PRESENT (TPS65217_STATUS_ACPWR | TPS65217_STATUS_USBPWR)
+#define NUM_CHARGER_IRQS       2
 #define POLL_INTERVAL          (HZ * 2)
 
 struct tps65217_charger {
        struct tps65217 *tps;
        struct device *dev;
-       struct power_supply *ac;
+       struct power_supply *psy;
 
-       int     ac_online;
-       int     prev_ac_online;
+       int     online;
+       int     prev_online;
 
        struct task_struct      *poll_task;
-
-       int     irq;
 };
 
-static enum power_supply_property tps65217_ac_props[] = {
+static enum power_supply_property tps65217_charger_props[] = {
        POWER_SUPPLY_PROP_ONLINE,
 };
 
@@ -95,7 +95,7 @@ static int tps65217_enable_charging(struct tps65217_charger *charger)
        int ret;
 
        /* charger already enabled */
-       if (charger->ac_online)
+       if (charger->online)
                return 0;
 
        dev_dbg(charger->dev, "%s: enable charging\n", __func__);
@@ -110,19 +110,19 @@ static int tps65217_enable_charging(struct tps65217_charger *charger)
                return ret;
        }
 
-       charger->ac_online = 1;
+       charger->online = 1;
 
        return 0;
 }
 
-static int tps65217_ac_get_property(struct power_supply *psy,
-                       enum power_supply_property psp,
-                       union power_supply_propval *val)
+static int tps65217_charger_get_property(struct power_supply *psy,
+                                        enum power_supply_property psp,
+                                        union power_supply_propval *val)
 {
        struct tps65217_charger *charger = power_supply_get_drvdata(psy);
 
        if (psp == POWER_SUPPLY_PROP_ONLINE) {
-               val->intval = charger->ac_online;
+               val->intval = charger->online;
                return 0;
        }
        return -EINVAL;
@@ -133,7 +133,7 @@ static irqreturn_t tps65217_charger_irq(int irq, void *dev)
        int ret, val;
        struct tps65217_charger *charger = dev;
 
-       charger->prev_ac_online = charger->ac_online;
+       charger->prev_online = charger->online;
 
        ret = tps65217_reg_read(charger->tps, TPS65217_REG_STATUS, &val);
        if (ret < 0) {
@@ -144,8 +144,8 @@ static irqreturn_t tps65217_charger_irq(int irq, void *dev)
 
        dev_dbg(charger->dev, "%s: 0x%x\n", __func__, val);
 
-       /* check for AC status bit */
-       if (val & TPS65217_STATUS_ACPWR) {
+       /* check for charger status bit */
+       if (val & CHARGER_STATUS_PRESENT) {
                ret = tps65217_enable_charging(charger);
                if (ret) {
                        dev_err(charger->dev,
@@ -153,11 +153,11 @@ static irqreturn_t tps65217_charger_irq(int irq, void *dev)
                        return IRQ_HANDLED;
                }
        } else {
-               charger->ac_online = 0;
+               charger->online = 0;
        }
 
-       if (charger->prev_ac_online != charger->ac_online)
-               power_supply_changed(charger->ac);
+       if (charger->prev_online != charger->online)
+               power_supply_changed(charger->psy);
 
        ret = tps65217_reg_read(charger->tps, TPS65217_REG_CHGCONFIG0, &val);
        if (ret < 0) {
@@ -188,11 +188,11 @@ static int tps65217_charger_poll_task(void *data)
 }
 
 static const struct power_supply_desc tps65217_charger_desc = {
-       .name                   = "tps65217-ac",
+       .name                   = "tps65217-charger",
        .type                   = POWER_SUPPLY_TYPE_MAINS,
-       .get_property           = tps65217_ac_get_property,
-       .properties             = tps65217_ac_props,
-       .num_properties         = ARRAY_SIZE(tps65217_ac_props),
+       .get_property           = tps65217_charger_get_property,
+       .properties             = tps65217_charger_props,
+       .num_properties         = ARRAY_SIZE(tps65217_charger_props),
 };
 
 static int tps65217_charger_probe(struct platform_device *pdev)
@@ -200,8 +200,10 @@ static int tps65217_charger_probe(struct platform_device *pdev)
        struct tps65217 *tps = dev_get_drvdata(pdev->dev.parent);
        struct tps65217_charger *charger;
        struct power_supply_config cfg = {};
-       int irq;
+       struct task_struct *poll_task;
+       int irq[NUM_CHARGER_IRQS];
        int ret;
+       int i;
 
        dev_dbg(&pdev->dev, "%s\n", __func__);
 
@@ -216,18 +218,16 @@ static int tps65217_charger_probe(struct platform_device *pdev)
        cfg.of_node = pdev->dev.of_node;
        cfg.drv_data = charger;
 
-       charger->ac = devm_power_supply_register(&pdev->dev,
-                                                &tps65217_charger_desc,
-                                                &cfg);
-       if (IS_ERR(charger->ac)) {
+       charger->psy = devm_power_supply_register(&pdev->dev,
+                                                 &tps65217_charger_desc,
+                                                 &cfg);
+       if (IS_ERR(charger->psy)) {
                dev_err(&pdev->dev, "failed: power supply register\n");
-               return PTR_ERR(charger->ac);
+               return PTR_ERR(charger->psy);
        }
 
-       irq = platform_get_irq_byname(pdev, "AC");
-       if (irq < 0)
-               irq = -ENXIO;
-       charger->irq = irq;
+       irq[0] = platform_get_irq_byname(pdev, "USB");
+       irq[1] = platform_get_irq_byname(pdev, "AC");
 
        ret = tps65217_config_charger(charger);
        if (ret < 0) {
@@ -235,29 +235,36 @@ static int tps65217_charger_probe(struct platform_device *pdev)
                return ret;
        }
 
-       if (irq != -ENXIO) {
-               ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+       /* Create a polling thread if an interrupt is invalid */
+       if (irq[0] < 0 || irq[1] < 0) {
+               poll_task = kthread_run(tps65217_charger_poll_task,
+                                       charger, "ktps65217charger");
+               if (IS_ERR(poll_task)) {
+                       ret = PTR_ERR(poll_task);
+                       dev_err(charger->dev,
+                               "Unable to run kthread err %d\n", ret);
+                       return ret;
+               }
+
+               charger->poll_task = poll_task;
+               return 0;
+       }
+
+       /* Create IRQ threads for charger interrupts */
+       for (i = 0; i < NUM_CHARGER_IRQS; i++) {
+               ret = devm_request_threaded_irq(&pdev->dev, irq[i], NULL,
                                                tps65217_charger_irq,
                                                0, "tps65217-charger",
                                                charger);
                if (ret) {
                        dev_err(charger->dev,
-                               "Unable to register irq %d err %d\n", irq,
+                               "Unable to register irq %d err %d\n", irq[i],
                                ret);
                        return ret;
                }
 
                /* Check current state */
-               tps65217_charger_irq(irq, charger);
-       } else {
-               charger->poll_task = kthread_run(tps65217_charger_poll_task,
-                                               charger, "ktps65217charger");
-               if (IS_ERR(charger->poll_task)) {
-                       ret = PTR_ERR(charger->poll_task);
-                       dev_err(charger->dev,
-                               "Unable to run kthread err %d\n", ret);
-                       return ret;
-               }
+               tps65217_charger_irq(-1, charger);
        }
 
        return 0;
@@ -267,7 +274,7 @@ static int tps65217_charger_remove(struct platform_device *pdev)
 {
        struct tps65217_charger *charger = platform_get_drvdata(pdev);
 
-       if (charger->irq == -ENXIO)
+       if (charger->poll_task)
                kthread_stop(charger->poll_task);
 
        return 0;
index e3edb31ac88045a0071c78dff2f20a4a9cac0284..bd4f66651513f8bdcdd7fe419c706acd82058277 100644 (file)
@@ -175,11 +175,6 @@ static int wm97xx_bat_probe(struct platform_device *dev)
        if (dev->id != -1)
                return -EINVAL;
 
-       if (!pdata) {
-               dev_err(&dev->dev, "No platform_data supplied\n");
-               return -EINVAL;
-       }
-
        if (gpio_is_valid(pdata->charge_gpio)) {
                ret = gpio_request(pdata->charge_gpio, "BATT CHRG");
                if (ret)
index a62a89674fb5f6f82b8223067a6e1ff2b62ade21..89bbd6e8bad131961a811831b724cfecce22e754 100644 (file)
@@ -180,7 +180,7 @@ static int pm800_get_current_limit(struct regulator_dev *rdev)
        return info->max_ua;
 }
 
-static struct regulator_ops pm800_volt_range_ops = {
+static const struct regulator_ops pm800_volt_range_ops = {
        .list_voltage           = regulator_list_voltage_linear_range,
        .map_voltage            = regulator_map_voltage_linear_range,
        .set_voltage_sel        = regulator_set_voltage_sel_regmap,
@@ -191,7 +191,7 @@ static struct regulator_ops pm800_volt_range_ops = {
        .get_current_limit      = pm800_get_current_limit,
 };
 
-static struct regulator_ops pm800_volt_table_ops = {
+static const struct regulator_ops pm800_volt_table_ops = {
        .list_voltage           = regulator_list_voltage_table,
        .map_voltage            = regulator_map_voltage_iterate,
        .set_voltage_sel        = regulator_set_voltage_sel_regmap,
index b100a63ff3b3f6398b649129559f8228f66265c2..fd86446e499b23057a4def120b3baa02e1950c2d 100644 (file)
@@ -220,7 +220,7 @@ static int pm8607_list_voltage(struct regulator_dev *rdev, unsigned index)
        return ret;
 }
 
-static struct regulator_ops pm8607_regulator_ops = {
+static const struct regulator_ops pm8607_regulator_ops = {
        .list_voltage   = pm8607_list_voltage,
        .set_voltage_sel = regulator_set_voltage_sel_regmap,
        .get_voltage_sel = regulator_get_voltage_sel_regmap,
@@ -229,7 +229,7 @@ static struct regulator_ops pm8607_regulator_ops = {
        .is_enabled = regulator_is_enabled_regmap,
 };
 
-static struct regulator_ops pm8606_preg_ops = {
+static const struct regulator_ops pm8606_preg_ops = {
        .enable         = regulator_enable_regmap,
        .disable        = regulator_disable_regmap,
        .is_enabled     = regulator_is_enabled_regmap,
index 936f7ccc9736f8c550f712f6cb9fd8c8622ce29c..be06eb29c6817bd30f5747384e2da7432f87644c 100644 (file)
@@ -163,6 +163,13 @@ config REGULATOR_BCM590XX
          BCM590xx PMUs. This will enable support for the software
          controllable LDO/Switching regulators.
 
+config REGULATOR_CPCAP
+       tristate "Motorola CPCAP regulator"
+       depends on MFD_CPCAP
+       help
+         Say y here for CPCAP regulator found on some Motorola phones
+         and tablets such as Droid 4.
+
 config REGULATOR_DA903X
        tristate "Dialog Semiconductor DA9030/DA9034 regulators"
        depends on PMIC_DA903X
index 14294692beb9d06bdded2eb5ce4c89a0d4a93fd3..ef7725e2592adde95d4fc250f7b626091d9ade7f 100644 (file)
@@ -11,6 +11,7 @@ obj-$(CONFIG_REGULATOR_USERSPACE_CONSUMER) += userspace-consumer.o
 
 obj-$(CONFIG_REGULATOR_88PM800) += 88pm800.o
 obj-$(CONFIG_REGULATOR_88PM8607) += 88pm8607.o
+obj-$(CONFIG_REGULATOR_CPCAP) += cpcap-regulator.o
 obj-$(CONFIG_REGULATOR_AAT2870) += aat2870-regulator.o
 obj-$(CONFIG_REGULATOR_AB3100) += ab3100.o
 obj-$(CONFIG_REGULATOR_AB8500) += ab8500-ext.o ab8500.o
index 9dfabda8f47826f72cddbf3486461c2b32815312..afc5b5900181c659ac102f2c852d1fe5493f966f 100644 (file)
@@ -97,7 +97,7 @@ static int aat2870_ldo_is_enabled(struct regulator_dev *rdev)
        return val & ri->enable_mask ? 1 : 0;
 }
 
-static struct regulator_ops aat2870_ldo_ops = {
+static const struct regulator_ops aat2870_ldo_ops = {
        .list_voltage = regulator_list_voltage_table,
        .map_voltage = regulator_map_voltage_ascend,
        .set_voltage_sel = aat2870_ldo_set_voltage_sel,
index 441864b9fece038fc3c95d62b2b436f1b7a04537..43fda8b4455acd87d4c910d5ecfdb885759bba06 100644 (file)
@@ -69,7 +69,7 @@ static const struct regulator_linear_range act8945a_voltage_ranges[] = {
        REGULATOR_LINEAR_RANGE(2400000, 48, 63, 100000),
 };
 
-static struct regulator_ops act8945a_ops = {
+static const struct regulator_ops act8945a_ops = {
        .list_voltage           = regulator_list_voltage_linear_range,
        .map_voltage            = regulator_map_voltage_linear_range,
        .get_voltage_sel        = regulator_get_voltage_sel_regmap,
index 8b0f788a9bbb61afb97390097b83458681e6c94c..11c1f880b7bbffbf7f0446c76ac6d3a27616bfa5 100644 (file)
@@ -181,7 +181,7 @@ static int ad5398_disable(struct regulator_dev *rdev)
        return ret;
 }
 
-static struct regulator_ops ad5398_ops = {
+static const struct regulator_ops ad5398_ops = {
        .get_current_limit = ad5398_get_current_limit,
        .set_current_limit = ad5398_set_current_limit,
        .enable = ad5398_enable,
index 3a6d0290c54c0fbd0f1c82ffcd5329c2facc56ae..b041f277a38b7ba6634656785012ed5737ab03e6 100644 (file)
@@ -301,7 +301,19 @@ static int anatop_regulator_probe(struct platform_device *pdev)
                        return -EINVAL;
                }
        } else {
+               u32 enable_bit;
+
                rdesc->ops = &anatop_rops;
+
+               if (!of_property_read_u32(np, "anatop-enable-bit",
+                                         &enable_bit)) {
+                       anatop_rops.enable  = regulator_enable_regmap;
+                       anatop_rops.disable = regulator_disable_regmap;
+                       anatop_rops.is_enabled = regulator_is_enabled_regmap;
+
+                       rdesc->enable_reg = sreg->control_reg;
+                       rdesc->enable_mask = BIT(enable_bit);
+               }
        }
 
        /* register regulator */
index 302b57cb89c673a8d8036234641d2c9df5622a9d..e76d094591e72bd1813d4d43f776644c0e314b3f 100644 (file)
@@ -109,7 +109,7 @@ static int arizona_ldo1_hc_get_voltage_sel(struct regulator_dev *rdev)
        return (val & ARIZONA_LDO1_VSEL_MASK) >> ARIZONA_LDO1_VSEL_SHIFT;
 }
 
-static struct regulator_ops arizona_ldo1_hc_ops = {
+static const struct regulator_ops arizona_ldo1_hc_ops = {
        .list_voltage = arizona_ldo1_hc_list_voltage,
        .map_voltage = arizona_ldo1_hc_map_voltage,
        .get_voltage_sel = arizona_ldo1_hc_get_voltage_sel,
@@ -135,7 +135,7 @@ static const struct regulator_desc arizona_ldo1_hc = {
        .owner = THIS_MODULE,
 };
 
-static struct regulator_ops arizona_ldo1_ops = {
+static const struct regulator_ops arizona_ldo1_ops = {
        .list_voltage = regulator_list_voltage_linear,
        .map_voltage = regulator_map_voltage_linear,
        .get_voltage_sel = regulator_get_voltage_sel_regmap,
index fcb98dbda8379c76f79372a0266056275ef8815a..22bd714076229be6129d73436ac12611f4956f4a 100644 (file)
@@ -45,6 +45,7 @@ static void arizona_micsupp_check_cp(struct work_struct *work)
        struct arizona_micsupp *micsupp =
                container_of(work, struct arizona_micsupp, check_cp_work);
        struct snd_soc_dapm_context *dapm = micsupp->arizona->dapm;
+       struct snd_soc_component *component = snd_soc_dapm_to_component(dapm);
        struct arizona *arizona = micsupp->arizona;
        struct regmap *regmap = arizona->regmap;
        unsigned int reg;
@@ -59,9 +60,10 @@ static void arizona_micsupp_check_cp(struct work_struct *work)
        if (dapm) {
                if ((reg & (ARIZONA_CPMIC_ENA | ARIZONA_CPMIC_BYPASS)) ==
                    ARIZONA_CPMIC_ENA)
-                       snd_soc_dapm_force_enable_pin(dapm, "MICSUPP");
+                       snd_soc_component_force_enable_pin(component,
+                                                          "MICSUPP");
                else
-                       snd_soc_dapm_disable_pin(dapm, "MICSUPP");
+                       snd_soc_component_disable_pin(component, "MICSUPP");
 
                snd_soc_dapm_sync(dapm);
        }
@@ -104,7 +106,7 @@ static int arizona_micsupp_set_bypass(struct regulator_dev *rdev, bool ena)
        return ret;
 }
 
-static struct regulator_ops arizona_micsupp_ops = {
+static const struct regulator_ops arizona_micsupp_ops = {
        .enable = arizona_micsupp_enable,
        .disable = arizona_micsupp_disable,
        .is_enabled = regulator_is_enabled_regmap,
index c0e93b1332f7b87f577b46c4a4fe1c93af12b152..874d415d6b4f9a5878fab8394bd043de0fdfeab1 100644 (file)
@@ -82,7 +82,7 @@ static unsigned int as3711_get_mode_sd(struct regulator_dev *rdev)
        return -EINVAL;
 }
 
-static struct regulator_ops as3711_sd_ops = {
+static const struct regulator_ops as3711_sd_ops = {
        .is_enabled             = regulator_is_enabled_regmap,
        .enable                 = regulator_enable_regmap,
        .disable                = regulator_disable_regmap,
@@ -94,7 +94,7 @@ static struct regulator_ops as3711_sd_ops = {
        .set_mode               = as3711_set_mode_sd,
 };
 
-static struct regulator_ops as3711_aldo_ops = {
+static const struct regulator_ops as3711_aldo_ops = {
        .is_enabled             = regulator_is_enabled_regmap,
        .enable                 = regulator_enable_regmap,
        .disable                = regulator_disable_regmap,
@@ -104,7 +104,7 @@ static struct regulator_ops as3711_aldo_ops = {
        .map_voltage            = regulator_map_voltage_linear_range,
 };
 
-static struct regulator_ops as3711_dldo_ops = {
+static const struct regulator_ops as3711_dldo_ops = {
        .is_enabled             = regulator_is_enabled_regmap,
        .enable                 = regulator_enable_regmap,
        .disable                = regulator_disable_regmap,
index e6a512ebeae2762812212ac5b9264f92bb8252be..0b9d4e3e52c7070f1532afa59d731da66c5893ef 100644 (file)
                .ops            = &axp20x_ops_range,                            \
        }
 
-static struct regulator_ops axp20x_ops_fixed = {
+static const struct regulator_ops axp20x_ops_fixed = {
        .list_voltage           = regulator_list_voltage_linear,
 };
 
-static struct regulator_ops axp20x_ops_range = {
+static const struct regulator_ops axp20x_ops_range = {
        .set_voltage_sel        = regulator_set_voltage_sel_regmap,
        .get_voltage_sel        = regulator_get_voltage_sel_regmap,
        .list_voltage           = regulator_list_voltage_linear_range,
@@ -141,7 +141,7 @@ static struct regulator_ops axp20x_ops_range = {
        .is_enabled             = regulator_is_enabled_regmap,
 };
 
-static struct regulator_ops axp20x_ops = {
+static const struct regulator_ops axp20x_ops = {
        .set_voltage_sel        = regulator_set_voltage_sel_regmap,
        .get_voltage_sel        = regulator_get_voltage_sel_regmap,
        .list_voltage           = regulator_list_voltage_linear,
@@ -150,7 +150,7 @@ static struct regulator_ops axp20x_ops = {
        .is_enabled             = regulator_is_enabled_regmap,
 };
 
-static struct regulator_ops axp20x_ops_sw = {
+static const struct regulator_ops axp20x_ops_sw = {
        .enable                 = regulator_enable_regmap,
        .disable                = regulator_disable_regmap,
        .is_enabled             = regulator_is_enabled_regmap,
@@ -272,7 +272,7 @@ static const struct regulator_desc axp806_regulators[] = {
                        64, AXP806_DCDCD_V_CTRL, 0x3f, AXP806_PWR_OUT_CTRL1,
                        BIT(3)),
        AXP_DESC(AXP806, DCDCE, "dcdce", "vine", 1100, 3400, 100,
-                AXP806_DCDCB_V_CTRL, 0x1f, AXP806_PWR_OUT_CTRL1, BIT(4)),
+                AXP806_DCDCE_V_CTRL, 0x1f, AXP806_PWR_OUT_CTRL1, BIT(4)),
        AXP_DESC(AXP806, ALDO1, "aldo1", "aldoin", 700, 3300, 100,
                 AXP806_ALDO1_V_CTRL, 0x1f, AXP806_PWR_OUT_CTRL1, BIT(5)),
        AXP_DESC(AXP806, ALDO2, "aldo2", "aldoin", 700, 3400, 100,
index 76b01835dcb44f51e59b8f4986b6fb9874a9a6c3..9dd715407b39471e29309a924393ef8b2ba6ee50 100644 (file)
@@ -250,7 +250,7 @@ static int bcm590xx_get_enable_register(int id)
        return reg;
 }
 
-static struct regulator_ops bcm590xx_ops_ldo = {
+static const struct regulator_ops bcm590xx_ops_ldo = {
        .is_enabled             = regulator_is_enabled_regmap,
        .enable                 = regulator_enable_regmap,
        .disable                = regulator_disable_regmap,
@@ -260,7 +260,7 @@ static struct regulator_ops bcm590xx_ops_ldo = {
        .map_voltage            = regulator_map_voltage_iterate,
 };
 
-static struct regulator_ops bcm590xx_ops_dcdc = {
+static const struct regulator_ops bcm590xx_ops_dcdc = {
        .is_enabled             = regulator_is_enabled_regmap,
        .enable                 = regulator_enable_regmap,
        .disable                = regulator_disable_regmap,
@@ -270,7 +270,7 @@ static struct regulator_ops bcm590xx_ops_dcdc = {
        .map_voltage            = regulator_map_voltage_linear_range,
 };
 
-static struct regulator_ops bcm590xx_ops_vbus = {
+static const struct regulator_ops bcm590xx_ops_vbus = {
        .is_enabled             = regulator_is_enabled_regmap,
        .enable                 = regulator_enable_regmap,
        .disable                = regulator_disable_regmap,
index 04baac9a165bbb56da292a51d0a56055947861e0..53d4fc70dbd0981cdb73eabdc815346c25963b6b 100644 (file)
@@ -1455,12 +1455,14 @@ static struct regulator_dev *regulator_lookup_by_name(const char *name)
  * lookup could succeed in the future.
  *
  * If successful, returns a struct regulator_dev that corresponds to the name
- * @supply and with the embedded struct device refcount incremented by one,
- * or NULL on failure. The refcount must be dropped by calling put_device().
+ * @supply and with the embedded struct device refcount incremented by one.
+ * The refcount must be dropped by calling put_device().
+ * On failure one of the following ERR-PTR-encoded values is returned:
+ * -ENODEV if lookup fails permanently, -EPROBE_DEFER if lookup could succeed
+ * in the future.
  */
 static struct regulator_dev *regulator_dev_lookup(struct device *dev,
-                                                 const char *supply,
-                                                 int *ret)
+                                                 const char *supply)
 {
        struct regulator_dev *r;
        struct device_node *node;
@@ -1476,16 +1478,12 @@ static struct regulator_dev *regulator_dev_lookup(struct device *dev,
                        r = of_find_regulator_by_node(node);
                        if (r)
                                return r;
-                       *ret = -EPROBE_DEFER;
-                       return NULL;
-               } else {
+
                        /*
-                        * If we couldn't even get the node then it's
-                        * not just that the device didn't register
-                        * yet, there's no node and we'll never
-                        * succeed.
+                        * We have a node, but there is no device.
+                        * assume it has not registered yet.
                         */
-                       *ret = -ENODEV;
+                       return ERR_PTR(-EPROBE_DEFER);
                }
        }
 
@@ -1506,13 +1504,16 @@ static struct regulator_dev *regulator_dev_lookup(struct device *dev,
 
                if (strcmp(map->supply, supply) == 0 &&
                    get_device(&map->regulator->dev)) {
-                       mutex_unlock(&regulator_list_mutex);
-                       return map->regulator;
+                       r = map->regulator;
+                       break;
                }
        }
        mutex_unlock(&regulator_list_mutex);
 
-       return NULL;
+       if (r)
+               return r;
+
+       return ERR_PTR(-ENODEV);
 }
 
 static int regulator_resolve_supply(struct regulator_dev *rdev)
@@ -1529,8 +1530,10 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
        if (rdev->supply)
                return 0;
 
-       r = regulator_dev_lookup(dev, rdev->supply_name, &ret);
-       if (!r) {
+       r = regulator_dev_lookup(dev, rdev->supply_name);
+       if (IS_ERR(r)) {
+               ret = PTR_ERR(r);
+
                if (ret == -ENODEV) {
                        /*
                         * No supply was specified for this regulator and
@@ -1553,6 +1556,19 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
                }
        }
 
+       /*
+        * If the supply's parent device is not the same as the
+        * regulator's parent device, then ensure the parent device
+        * is bound before we resolve the supply, in case the parent
+        * device get probe deferred and unregisters the supply.
+        */
+       if (r->dev.parent && r->dev.parent != rdev->dev.parent) {
+               if (!device_is_bound(r->dev.parent)) {
+                       put_device(&r->dev);
+                       return -EPROBE_DEFER;
+               }
+       }
+
        /* Recursively resolve the supply of the supply */
        ret = regulator_resolve_supply(r);
        if (ret < 0) {
@@ -1580,69 +1596,72 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
 }
 
 /* Internal regulator request function */
-static struct regulator *_regulator_get(struct device *dev, const char *id,
-                                       bool exclusive, bool allow_dummy)
+struct regulator *_regulator_get(struct device *dev, const char *id,
+                                enum regulator_get_type get_type)
 {
        struct regulator_dev *rdev;
-       struct regulator *regulator = ERR_PTR(-EPROBE_DEFER);
-       const char *devname = NULL;
+       struct regulator *regulator;
+       const char *devname = dev ? dev_name(dev) : "deviceless";
        int ret;
 
+       if (get_type >= MAX_GET_TYPE) {
+               dev_err(dev, "invalid type %d in %s\n", get_type, __func__);
+               return ERR_PTR(-EINVAL);
+       }
+
        if (id == NULL) {
                pr_err("get() with no identifier\n");
                return ERR_PTR(-EINVAL);
        }
 
-       if (dev)
-               devname = dev_name(dev);
+       rdev = regulator_dev_lookup(dev, id);
+       if (IS_ERR(rdev)) {
+               ret = PTR_ERR(rdev);
 
-       if (have_full_constraints())
-               ret = -ENODEV;
-       else
-               ret = -EPROBE_DEFER;
-
-       rdev = regulator_dev_lookup(dev, id, &ret);
-       if (rdev)
-               goto found;
-
-       regulator = ERR_PTR(ret);
+               /*
+                * If regulator_dev_lookup() fails with error other
+                * than -ENODEV our job here is done, we simply return it.
+                */
+               if (ret != -ENODEV)
+                       return ERR_PTR(ret);
 
-       /*
-        * If we have return value from dev_lookup fail, we do not expect to
-        * succeed, so, quit with appropriate error value
-        */
-       if (ret && ret != -ENODEV)
-               return regulator;
+               if (!have_full_constraints()) {
+                       dev_warn(dev,
+                                "incomplete constraints, dummy supplies not allowed\n");
+                       return ERR_PTR(-ENODEV);
+               }
 
-       if (!devname)
-               devname = "deviceless";
+               switch (get_type) {
+               case NORMAL_GET:
+                       /*
+                        * Assume that a regulator is physically present and
+                        * enabled, even if it isn't hooked up, and just
+                        * provide a dummy.
+                        */
+                       dev_warn(dev,
+                                "%s supply %s not found, using dummy regulator\n",
+                                devname, id);
+                       rdev = dummy_regulator_rdev;
+                       get_device(&rdev->dev);
+                       break;
 
-       /*
-        * Assume that a regulator is physically present and enabled
-        * even if it isn't hooked up and just provide a dummy.
-        */
-       if (have_full_constraints() && allow_dummy) {
-               pr_warn("%s supply %s not found, using dummy regulator\n",
-                       devname, id);
+               case EXCLUSIVE_GET:
+                       dev_warn(dev,
+                                "dummy supplies not allowed for exclusive requests\n");
+                       /* fall through */
 
-               rdev = dummy_regulator_rdev;
-               get_device(&rdev->dev);
-               goto found;
-       /* Don't log an error when called from regulator_get_optional() */
-       } else if (!have_full_constraints() || exclusive) {
-               dev_warn(dev, "dummy supplies not allowed\n");
+               default:
+                       return ERR_PTR(-ENODEV);
+               }
        }
 
-       return regulator;
-
-found:
        if (rdev->exclusive) {
                regulator = ERR_PTR(-EPERM);
                put_device(&rdev->dev);
                return regulator;
        }
 
-       if (exclusive && rdev->open_count) {
+       if (get_type == EXCLUSIVE_GET && rdev->open_count) {
                regulator = ERR_PTR(-EBUSY);
                put_device(&rdev->dev);
                return regulator;
@@ -1656,6 +1675,7 @@ found:
        }
 
        if (!try_module_get(rdev->owner)) {
+               regulator = ERR_PTR(-EPROBE_DEFER);
                put_device(&rdev->dev);
                return regulator;
        }
@@ -1669,7 +1689,7 @@ found:
        }
 
        rdev->open_count++;
-       if (exclusive) {
+       if (get_type == EXCLUSIVE_GET) {
                rdev->exclusive = 1;
 
                ret = _regulator_is_enabled(rdev);
@@ -1697,7 +1717,7 @@ found:
  */
 struct regulator *regulator_get(struct device *dev, const char *id)
 {
-       return _regulator_get(dev, id, false, true);
+       return _regulator_get(dev, id, NORMAL_GET);
 }
 EXPORT_SYMBOL_GPL(regulator_get);
 
@@ -1724,7 +1744,7 @@ EXPORT_SYMBOL_GPL(regulator_get);
  */
 struct regulator *regulator_get_exclusive(struct device *dev, const char *id)
 {
-       return _regulator_get(dev, id, true, false);
+       return _regulator_get(dev, id, EXCLUSIVE_GET);
 }
 EXPORT_SYMBOL_GPL(regulator_get_exclusive);
 
@@ -1750,7 +1770,7 @@ EXPORT_SYMBOL_GPL(regulator_get_exclusive);
  */
 struct regulator *regulator_get_optional(struct device *dev, const char *id)
 {
-       return _regulator_get(dev, id, false, false);
+       return _regulator_get(dev, id, OPTIONAL_GET);
 }
 EXPORT_SYMBOL_GPL(regulator_get_optional);
 
@@ -3660,7 +3680,7 @@ err:
        for (++i; i < num_consumers; ++i) {
                r = regulator_enable(consumers[i].consumer);
                if (r != 0)
-                       pr_err("Failed to reename %s: %d\n",
+                       pr_err("Failed to re-enable %s: %d\n",
                               consumers[i].supply, r);
        }
 
@@ -3686,21 +3706,17 @@ int regulator_bulk_force_disable(int num_consumers,
                           struct regulator_bulk_data *consumers)
 {
        int i;
-       int ret;
+       int ret = 0;
 
-       for (i = 0; i < num_consumers; i++)
+       for (i = 0; i < num_consumers; i++) {
                consumers[i].ret =
                            regulator_force_disable(consumers[i].consumer);
 
-       for (i = 0; i < num_consumers; i++) {
-               if (consumers[i].ret != 0) {
+               /* Store first error for reporting */
+               if (consumers[i].ret && !ret)
                        ret = consumers[i].ret;
-                       goto out;
-               }
        }
 
-       return 0;
-out:
        return ret;
 }
 EXPORT_SYMBOL_GPL(regulator_bulk_force_disable);
@@ -4391,12 +4407,13 @@ static void regulator_summary_show_subtree(struct seq_file *s,
        seq_puts(s, "\n");
 
        list_for_each_entry(consumer, &rdev->consumer_list, list) {
-               if (consumer->dev->class == &regulator_class)
+               if (consumer->dev && consumer->dev->class == &regulator_class)
                        continue;
 
                seq_printf(s, "%*s%-*s ",
                           (level + 1) * 3 + 1, "",
-                          30 - (level + 1) * 3, dev_name(consumer->dev));
+                          30 - (level + 1) * 3,
+                          consumer->dev ? dev_name(consumer->dev) : "deviceless");
 
                switch (rdev->desc->type) {
                case REGULATOR_VOLTAGE:
@@ -4540,6 +4557,16 @@ static int __init regulator_init_complete(void)
        if (of_have_populated_dt())
                has_full_constraints = true;
 
+       /*
+        * Regulators may had failed to resolve their input supplies
+        * when were registered, either because the input supply was
+        * not registered yet or because its parent device was not
+        * bound yet. So attempt to resolve the input supplies for
+        * pending regulators before trying to disable unused ones.
+        */
+       class_for_each_device(&regulator_class, NULL, NULL,
+                             regulator_register_resolve_supply);
+
        /* If we have a full configuration then disable any regulators
         * we have permission to change the status for and which are
         * not in use or always_on.  This is effectively the default
diff --git a/drivers/regulator/cpcap-regulator.c b/drivers/regulator/cpcap-regulator.c
new file mode 100644 (file)
index 0000000..cc98ace
--- /dev/null
@@ -0,0 +1,464 @@
+/*
+ * Motorola CPCAP PMIC regulator driver
+ *
+ * Based on cpcap-regulator.c from Motorola Linux kernel tree
+ * Copyright (C) 2009-2011 Motorola, Inc.
+ *
+ * Rewritten for mainline kernel to use device tree and regmap
+ * Copyright (C) 2017 Tony Lindgren <tony@atomide.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/mfd/motorola-cpcap.h>
+
+/*
+ * Resource assignment register bits. These seem to control the state
+ * idle modes adn are used at least for omap4.
+ */
+
+/* CPCAP_REG_ASSIGN2 bits - Resource Assignment 2 */
+#define CPCAP_BIT_VSDIO_SEL            BIT(15)
+#define CPCAP_BIT_VDIG_SEL             BIT(14)
+#define CPCAP_BIT_VCAM_SEL             BIT(13)
+#define CPCAP_BIT_SW6_SEL              BIT(12)
+#define CPCAP_BIT_SW5_SEL              BIT(11)
+#define CPCAP_BIT_SW4_SEL              BIT(10)
+#define CPCAP_BIT_SW3_SEL              BIT(9)
+#define CPCAP_BIT_SW2_SEL              BIT(8)
+#define CPCAP_BIT_SW1_SEL              BIT(7)
+
+/* CPCAP_REG_ASSIGN3 bits - Resource Assignment 3 */
+#define CPCAP_BIT_VUSBINT2_SEL         BIT(15)
+#define CPCAP_BIT_VUSBINT1_SEL         BIT(14)
+#define CPCAP_BIT_VVIB_SEL             BIT(13)
+#define CPCAP_BIT_VWLAN1_SEL           BIT(12)
+#define CPCAP_BIT_VRF1_SEL             BIT(11)
+#define CPCAP_BIT_VHVIO_SEL            BIT(10)
+#define CPCAP_BIT_VDAC_SEL             BIT(9)
+#define CPCAP_BIT_VUSB_SEL             BIT(8)
+#define CPCAP_BIT_VSIM_SEL             BIT(7)
+#define CPCAP_BIT_VRFREF_SEL           BIT(6)
+#define CPCAP_BIT_VPLL_SEL             BIT(5)
+#define CPCAP_BIT_VFUSE_SEL            BIT(4)
+#define CPCAP_BIT_VCSI_SEL             BIT(3)
+#define CPCAP_BIT_SPARE_14_2           BIT(2)
+#define CPCAP_BIT_VWLAN2_SEL           BIT(1)
+#define CPCAP_BIT_VRF2_SEL             BIT(0)
+
+/* CPCAP_REG_ASSIGN4 bits - Resource Assignment 4 */
+#define CPCAP_BIT_VAUDIO_SEL           BIT(0)
+
+/*
+ * Enable register bits. At least CPCAP_BIT_AUDIO_LOW_PWR is generic,
+ * and not limited to audio regulator. Let's use the Motorola kernel
+ * naming for now until we have a better understanding of the other
+ * enable register bits. No idea why BIT(3) is not defined.
+ */
+#define CPCAP_BIT_AUDIO_LOW_PWR                BIT(6)
+#define CPCAP_BIT_AUD_LOWPWR_SPEED     BIT(5)
+#define CPCAP_BIT_VAUDIOPRISTBY                BIT(4)
+#define CPCAP_BIT_VAUDIO_MODE1         BIT(2)
+#define CPCAP_BIT_VAUDIO_MODE0         BIT(1)
+#define CPCAP_BIT_V_AUDIO_EN           BIT(0)
+
+/*
+ * Off mode configuration bit. Used currently only by SW5 on omap4. There's
+ * the following comment in Motorola Linux kernel tree for it:
+ *
+ * When set in the regulator mode, the regulator assignment will be changed
+ * to secondary when the regulator is disabled. The mode will be set back to
+ * primary when the regulator is turned on.
+ */
+#define CPCAP_REG_OFF_MODE_SEC         BIT(15)
+
+/**
+ * SoC specific configuraion for CPCAP regulator. There are at least three
+ * different SoCs each with their own parameters: omap3, omap4 and tegra2.
+ *
+ * The assign_reg and assign_mask seem to allow toggling between primary
+ * and secondary mode that at least omap4 uses for off mode.
+ */
+struct cpcap_regulator {
+       struct regulator_desc rdesc;
+       const u16 assign_reg;
+       const u16 assign_mask;
+       const u16 vsel_shift;
+};
+
+#define CPCAP_REG(_ID, reg, assignment_reg, assignment_mask, val_tbl,  \
+               mode_mask, volt_mask, volt_shft,                        \
+               mode_val, off_val, volt_trans_time) {                   \
+       .rdesc = {                                                      \
+               .name = #_ID,                                           \
+               .of_match = of_match_ptr(#_ID),                         \
+               .ops = &cpcap_regulator_ops,                            \
+               .regulators_node = of_match_ptr("regulators"),          \
+               .type = REGULATOR_VOLTAGE,                              \
+               .id = CPCAP_##_ID,                                      \
+               .owner = THIS_MODULE,                                   \
+               .n_voltages = ARRAY_SIZE(val_tbl),                      \
+               .volt_table = (val_tbl),                                \
+               .vsel_reg = (reg),                                      \
+               .vsel_mask = (volt_mask),                               \
+               .enable_reg = (reg),                                    \
+               .enable_mask = (mode_mask),                             \
+               .enable_val = (mode_val),                               \
+               .disable_val = (off_val),                               \
+               .ramp_delay = (volt_trans_time),                        \
+       },                                                              \
+       .assign_reg = (assignment_reg),                                 \
+       .assign_mask = (assignment_mask),                               \
+       .vsel_shift = (volt_shft),                                      \
+}
+
+struct cpcap_ddata {
+       struct regmap *reg;
+       struct device *dev;
+       const struct cpcap_regulator *soc;
+};
+
+enum cpcap_regulator_id {
+       CPCAP_SW1,
+       CPCAP_SW2,
+       CPCAP_SW3,
+       CPCAP_SW4,
+       CPCAP_SW5,
+       CPCAP_SW6,
+       CPCAP_VCAM,
+       CPCAP_VCSI,
+       CPCAP_VDAC,
+       CPCAP_VDIG,
+       CPCAP_VFUSE,
+       CPCAP_VHVIO,
+       CPCAP_VSDIO,
+       CPCAP_VPLL,
+       CPCAP_VRF1,
+       CPCAP_VRF2,
+       CPCAP_VRFREF,
+       CPCAP_VWLAN1,
+       CPCAP_VWLAN2,
+       CPCAP_VSIM,
+       CPCAP_VSIMCARD,
+       CPCAP_VVIB,
+       CPCAP_VUSB,
+       CPCAP_VAUDIO,
+       CPCAP_NR_REGULATORS,
+};
+
+/*
+ * We need to also configure regulator idle mode for SoC off mode if
+ * CPCAP_REG_OFF_MODE_SEC is set.
+ */
+static int cpcap_regulator_enable(struct regulator_dev *rdev)
+{
+       struct cpcap_regulator *regulator = rdev_get_drvdata(rdev);
+       int error, ignore;
+
+       error = regulator_enable_regmap(rdev);
+       if (error)
+               return error;
+
+       if (rdev->desc->enable_val & CPCAP_REG_OFF_MODE_SEC) {
+               error = regmap_update_bits(rdev->regmap, regulator->assign_reg,
+                                          regulator->assign_mask,
+                                          regulator->assign_mask);
+               if (error)
+                       ignore = regulator_disable_regmap(rdev);
+       }
+
+       return error;
+}
+
+/*
+ * We need to also configure regulator idle mode for SoC off mode if
+ * CPCAP_REG_OFF_MODE_SEC is set.
+ */
+static int cpcap_regulator_disable(struct regulator_dev *rdev)
+{
+       struct cpcap_regulator *regulator = rdev_get_drvdata(rdev);
+       int error, ignore;
+
+       if (rdev->desc->enable_val & CPCAP_REG_OFF_MODE_SEC) {
+               error = regmap_update_bits(rdev->regmap, regulator->assign_reg,
+                                          regulator->assign_mask, 0);
+               if (error)
+                       return error;
+       }
+
+       error = regulator_disable_regmap(rdev);
+       if (error && (rdev->desc->enable_val & CPCAP_REG_OFF_MODE_SEC)) {
+               ignore = regmap_update_bits(rdev->regmap, regulator->assign_reg,
+                                           regulator->assign_mask,
+                                           regulator->assign_mask);
+       }
+
+       return error;
+}
+
+static unsigned int cpcap_regulator_get_mode(struct regulator_dev *rdev)
+{
+       int value;
+
+       regmap_read(rdev->regmap, rdev->desc->enable_reg, &value);
+
+       if (!(value & CPCAP_BIT_AUDIO_LOW_PWR))
+               return REGULATOR_MODE_STANDBY;
+
+       return REGULATOR_MODE_NORMAL;
+}
+
+static int cpcap_regulator_set_mode(struct regulator_dev *rdev,
+                                   unsigned int mode)
+{
+       int value;
+
+       switch (mode) {
+       case REGULATOR_MODE_NORMAL:
+               value = CPCAP_BIT_AUDIO_LOW_PWR;
+               break;
+       case REGULATOR_MODE_STANDBY:
+               value = 0;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
+                                 CPCAP_BIT_AUDIO_LOW_PWR, value);
+}
+
+static struct regulator_ops cpcap_regulator_ops = {
+       .enable = cpcap_regulator_enable,
+       .disable = cpcap_regulator_disable,
+       .is_enabled = regulator_is_enabled_regmap,
+       .list_voltage = regulator_list_voltage_table,
+       .map_voltage = regulator_map_voltage_iterate,
+       .get_voltage_sel = regulator_get_voltage_sel_regmap,
+       .set_voltage_sel = regulator_set_voltage_sel_regmap,
+       .get_mode = cpcap_regulator_get_mode,
+       .set_mode = cpcap_regulator_set_mode,
+};
+
+static const unsigned int unknown_val_tbl[] = { 0, };
+static const unsigned int sw5_val_tbl[] = { 0, 5050000, };
+static const unsigned int vcam_val_tbl[] = { 2600000, 2700000, 2800000,
+                                            2900000, };
+static const unsigned int vcsi_val_tbl[] = { 1200000, 1800000, };
+static const unsigned int vdac_val_tbl[] = { 1200000, 1500000, 1800000,
+                                            2500000,};
+static const unsigned int vdig_val_tbl[] = { 1200000, 1350000, 1500000,
+                                            1875000, };
+static const unsigned int vfuse_val_tbl[] = { 1500000, 1600000, 1700000,
+                                             1800000, 1900000, 2000000,
+                                             2100000, 2200000, 2300000,
+                                             2400000, 2500000, 2600000,
+                                             2700000, 3150000, };
+static const unsigned int vhvio_val_tbl[] = { 2775000, };
+static const unsigned int vsdio_val_tbl[] = { 1500000, 1600000, 1800000,
+                                             2600000, 2700000, 2800000,
+                                             2900000, 3000000, };
+static const unsigned int vpll_val_tbl[] = { 1200000, 1300000, 1400000,
+                                            1800000, };
+/* Quirk: 2775000 is before 2500000 for vrf1 regulator */
+static const unsigned int vrf1_val_tbl[] = { 2775000, 2500000, };
+static const unsigned int vrf2_val_tbl[] = { 0, 2775000, };
+static const unsigned int vrfref_val_tbl[] = { 2500000, 2775000, };
+static const unsigned int vwlan1_val_tbl[] = { 1800000, 1900000, };
+static const unsigned int vwlan2_val_tbl[] = { 2775000, 3000000, 3300000,
+                                              3300000, };
+static const unsigned int vsim_val_tbl[] = { 1800000, 2900000, };
+static const unsigned int vsimcard_val_tbl[] = { 1800000, 2900000, };
+static const unsigned int vvib_val_tbl[] = { 1300000, 1800000, 2000000,
+                                            3000000, };
+static const unsigned int vusb_val_tbl[] = { 0, 3300000, };
+static const unsigned int vaudio_val_tbl[] = { 0, 2775000, };
+
+/**
+ * SoC specific configuration for omap4. The data below is comes from Motorola
+ * Linux kernel tree. It's basically the values of cpcap_regltr_data,
+ * cpcap_regulator_mode_values and cpcap_regulator_off_mode_values, see
+ * CPCAP_REG macro above.
+ *
+ * SW1 to SW4 and SW6 seems to be unused for mapphone. Note that VSIM and
+ * VSIMCARD have a shared resource assignment bit.
+ */
+static struct cpcap_regulator omap4_regulators[] = {
+       CPCAP_REG(SW1, CPCAP_REG_S1C1, CPCAP_REG_ASSIGN2,
+                 CPCAP_BIT_SW1_SEL, unknown_val_tbl,
+                 0, 0, 0, 0, 0, 0),
+       CPCAP_REG(SW2, CPCAP_REG_S2C1, CPCAP_REG_ASSIGN2,
+                 CPCAP_BIT_SW2_SEL, unknown_val_tbl,
+                 0, 0, 0, 0, 0, 0),
+       CPCAP_REG(SW3, CPCAP_REG_S3C, CPCAP_REG_ASSIGN2,
+                 CPCAP_BIT_SW3_SEL, unknown_val_tbl,
+                 0, 0, 0, 0, 0, 0),
+       CPCAP_REG(SW4, CPCAP_REG_S4C1, CPCAP_REG_ASSIGN2,
+                 CPCAP_BIT_SW4_SEL, unknown_val_tbl,
+                 0, 0, 0, 0, 0, 0),
+       CPCAP_REG(SW5, CPCAP_REG_S5C, CPCAP_REG_ASSIGN2,
+                 CPCAP_BIT_SW5_SEL, sw5_val_tbl,
+                 0x28, 0, 0, 0x20 | CPCAP_REG_OFF_MODE_SEC, 0, 0),
+       CPCAP_REG(SW6, CPCAP_REG_S6C, CPCAP_REG_ASSIGN2,
+                 CPCAP_BIT_SW6_SEL, unknown_val_tbl,
+                 0, 0, 0, 0, 0, 0),
+       CPCAP_REG(VCAM, CPCAP_REG_VCAMC, CPCAP_REG_ASSIGN2,
+                 CPCAP_BIT_VCAM_SEL, vcam_val_tbl,
+                 0x87, 0x30, 4, 0x3, 0, 420),
+       CPCAP_REG(VCSI, CPCAP_REG_VCSIC, CPCAP_REG_ASSIGN3,
+                 CPCAP_BIT_VCSI_SEL, vcsi_val_tbl,
+                 0x47, 0x10, 4, 0x43, 0x41, 350),
+       CPCAP_REG(VDAC, CPCAP_REG_VDACC, CPCAP_REG_ASSIGN3,
+                 CPCAP_BIT_VDAC_SEL, vdac_val_tbl,
+                 0x87, 0x30, 4, 0x3, 0, 420),
+       CPCAP_REG(VDIG, CPCAP_REG_VDIGC, CPCAP_REG_ASSIGN2,
+                 CPCAP_BIT_VDIG_SEL, vdig_val_tbl,
+                 0x87, 0x30, 4, 0x82, 0, 420),
+       CPCAP_REG(VFUSE, CPCAP_REG_VFUSEC, CPCAP_REG_ASSIGN3,
+                 CPCAP_BIT_VFUSE_SEL, vfuse_val_tbl,
+                 0x80, 0xf, 0, 0x80, 0, 420),
+       CPCAP_REG(VHVIO, CPCAP_REG_VHVIOC, CPCAP_REG_ASSIGN3,
+                 CPCAP_BIT_VHVIO_SEL, vhvio_val_tbl,
+                 0x17, 0, 0, 0, 0x12, 0),
+       CPCAP_REG(VSDIO, CPCAP_REG_VSDIOC, CPCAP_REG_ASSIGN2,
+                 CPCAP_BIT_VSDIO_SEL, vsdio_val_tbl,
+                 0x87, 0x38, 3, 0x82, 0, 420),
+       CPCAP_REG(VPLL, CPCAP_REG_VPLLC, CPCAP_REG_ASSIGN3,
+                 CPCAP_BIT_VPLL_SEL, vpll_val_tbl,
+                 0x43, 0x18, 3, 0x2, 0, 420),
+       CPCAP_REG(VRF1, CPCAP_REG_VRF1C, CPCAP_REG_ASSIGN3,
+                 CPCAP_BIT_VRF1_SEL, vrf1_val_tbl,
+                 0xac, 0x2, 1, 0x4, 0, 10),
+       CPCAP_REG(VRF2, CPCAP_REG_VRF2C, CPCAP_REG_ASSIGN3,
+                 CPCAP_BIT_VRF2_SEL, vrf2_val_tbl,
+                 0x23, 0x8, 3, 0, 0, 10),
+       CPCAP_REG(VRFREF, CPCAP_REG_VRFREFC, CPCAP_REG_ASSIGN3,
+                 CPCAP_BIT_VRFREF_SEL, vrfref_val_tbl,
+                 0x23, 0x8, 3, 0, 0, 420),
+       CPCAP_REG(VWLAN1, CPCAP_REG_VWLAN1C, CPCAP_REG_ASSIGN3,
+                 CPCAP_BIT_VWLAN1_SEL, vwlan1_val_tbl,
+                 0x47, 0x10, 4, 0, 0, 420),
+       CPCAP_REG(VWLAN2, CPCAP_REG_VWLAN2C, CPCAP_REG_ASSIGN3,
+                 CPCAP_BIT_VWLAN2_SEL, vwlan2_val_tbl,
+                 0x20c, 0xc0, 6, 0x20c, 0, 420),
+       CPCAP_REG(VSIM, CPCAP_REG_VSIMC, CPCAP_REG_ASSIGN3,
+                 0xffff, vsim_val_tbl,
+                 0x23, 0x8, 3, 0x3, 0, 420),
+       CPCAP_REG(VSIMCARD, CPCAP_REG_VSIMC, CPCAP_REG_ASSIGN3,
+                 0xffff, vsimcard_val_tbl,
+                 0x1e80, 0x8, 3, 0x1e00, 0, 420),
+       CPCAP_REG(VVIB, CPCAP_REG_VVIBC, CPCAP_REG_ASSIGN3,
+                 CPCAP_BIT_VVIB_SEL, vvib_val_tbl,
+                 0x1, 0xc, 2, 0x1, 0, 500),
+       CPCAP_REG(VUSB, CPCAP_REG_VUSBC, CPCAP_REG_ASSIGN3,
+                 CPCAP_BIT_VUSB_SEL, vusb_val_tbl,
+                 0x11c, 0x40, 6, 0xc, 0, 0),
+       CPCAP_REG(VAUDIO, CPCAP_REG_VAUDIOC, CPCAP_REG_ASSIGN4,
+                 CPCAP_BIT_VAUDIO_SEL, vaudio_val_tbl,
+                 0x16, 0x1, 0, 0x4, 0, 0),
+       { /* sentinel */ },
+};
+
+static const struct of_device_id cpcap_regulator_id_table[] = {
+       {
+               .compatible = "motorola,cpcap-regulator",
+       },
+       {
+               .compatible = "motorola,mapphone-cpcap-regulator",
+               .data = omap4_regulators,
+       },
+       {},
+};
+MODULE_DEVICE_TABLE(of, cpcap_regulator_id_table);
+
+static int cpcap_regulator_probe(struct platform_device *pdev)
+{
+       struct cpcap_ddata *ddata;
+       const struct of_device_id *match;
+       struct regulator_config config;
+       struct regulator_init_data init_data;
+       int i;
+
+       match = of_match_device(of_match_ptr(cpcap_regulator_id_table),
+                               &pdev->dev);
+       if (!match)
+               return -EINVAL;
+
+       if (!match->data) {
+               dev_err(&pdev->dev, "no configuration data found\n");
+
+               return -ENODEV;
+       }
+
+       ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
+       if (!ddata)
+               return -ENOMEM;
+
+       ddata->reg = dev_get_regmap(pdev->dev.parent, NULL);
+       if (!ddata->reg)
+               return -ENODEV;
+
+       ddata->dev = &pdev->dev;
+       ddata->soc = match->data;
+       platform_set_drvdata(pdev, ddata);
+
+       memset(&config, 0, sizeof(config));
+       memset(&init_data, 0, sizeof(init_data));
+       config.dev = &pdev->dev;
+       config.regmap = ddata->reg;
+       config.init_data = &init_data;
+
+       for (i = 0; i < CPCAP_NR_REGULATORS; i++) {
+               const struct cpcap_regulator *regulator = &ddata->soc[i];
+               struct regulator_dev *rdev;
+
+               if (!regulator->rdesc.name)
+                       break;
+
+               if (regulator->rdesc.volt_table == unknown_val_tbl)
+                       continue;
+
+               config.driver_data = (void *)regulator;
+               rdev = devm_regulator_register(&pdev->dev,
+                                              &regulator->rdesc,
+                                              &config);
+               if (IS_ERR(rdev)) {
+                       dev_err(&pdev->dev, "failed to register regulator %s\n",
+                               regulator->rdesc.name);
+
+                       return PTR_ERR(rdev);
+               }
+       }
+
+       return 0;
+}
+
+static struct platform_driver cpcap_regulator_driver = {
+       .probe          = cpcap_regulator_probe,
+       .driver         = {
+               .name   = "cpcap-regulator",
+               .of_match_table = of_match_ptr(cpcap_regulator_id_table),
+       },
+};
+
+module_platform_driver(cpcap_regulator_driver);
+
+MODULE_ALIAS("platform:cpcap-regulator");
+MODULE_AUTHOR("Tony Lindgren <tony@atomide.com>");
+MODULE_DESCRIPTION("CPCAP regulator driver");
+MODULE_LICENSE("GPL v2");
index 6ec1d400adae7e444102266385171c5d787110a3..784e3bf32210bb74a34934078dc6218a82e2aa6e 100644 (file)
 
 #include "internal.h"
 
-enum {
-       NORMAL_GET,
-       EXCLUSIVE_GET,
-       OPTIONAL_GET,
-};
-
 static void devm_regulator_release(struct device *dev, void *res)
 {
        regulator_put(*(struct regulator **)res);
@@ -39,20 +33,7 @@ static struct regulator *_devm_regulator_get(struct device *dev, const char *id,
        if (!ptr)
                return ERR_PTR(-ENOMEM);
 
-       switch (get_type) {
-       case NORMAL_GET:
-               regulator = regulator_get(dev, id);
-               break;
-       case EXCLUSIVE_GET:
-               regulator = regulator_get_exclusive(dev, id);
-               break;
-       case OPTIONAL_GET:
-               regulator = regulator_get_optional(dev, id);
-               break;
-       default:
-               regulator = ERR_PTR(-EINVAL);
-       }
-
+       regulator = _regulator_get(dev, id, get_type);
        if (!IS_ERR(regulator)) {
                *ptr = regulator;
                devres_add(dev, ptr);
@@ -139,6 +120,18 @@ void devm_regulator_put(struct regulator *regulator)
 }
 EXPORT_SYMBOL_GPL(devm_regulator_put);
 
+struct regulator_bulk_devres {
+       struct regulator_bulk_data *consumers;
+       int num_consumers;
+};
+
+static void devm_regulator_bulk_release(struct device *dev, void *res)
+{
+       struct regulator_bulk_devres *devres = res;
+
+       regulator_bulk_free(devres->num_consumers, devres->consumers);
+}
+
 /**
  * devm_regulator_bulk_get - managed get multiple regulator consumers
  *
@@ -157,29 +150,22 @@ EXPORT_SYMBOL_GPL(devm_regulator_put);
 int devm_regulator_bulk_get(struct device *dev, int num_consumers,
                            struct regulator_bulk_data *consumers)
 {
-       int i;
+       struct regulator_bulk_devres *devres;
        int ret;
 
-       for (i = 0; i < num_consumers; i++)
-               consumers[i].consumer = NULL;
-
-       for (i = 0; i < num_consumers; i++) {
-               consumers[i].consumer = devm_regulator_get(dev,
-                                                          consumers[i].supply);
-               if (IS_ERR(consumers[i].consumer)) {
-                       ret = PTR_ERR(consumers[i].consumer);
-                       dev_err(dev, "Failed to get supply '%s': %d\n",
-                               consumers[i].supply, ret);
-                       consumers[i].consumer = NULL;
-                       goto err;
-               }
-       }
-
-       return 0;
+       devres = devres_alloc(devm_regulator_bulk_release,
+                             sizeof(*devres), GFP_KERNEL);
+       if (!devres)
+               return -ENOMEM;
 
-err:
-       for (i = 0; i < num_consumers && consumers[i].consumer; i++)
-               devm_regulator_put(consumers[i].consumer);
+       ret = regulator_bulk_get(dev, num_consumers, consumers);
+       if (!ret) {
+               devres->consumers = consumers;
+               devres->num_consumers = num_consumers;
+               devres_add(dev, devres);
+       } else {
+               devres_free(devres);
+       }
 
        return ret;
 }
index d7da81a875cf612bf367d08f174f66f18aed4341..60f4318315826765d6f52bc4ff4e863dc3873ebc 100644 (file)
@@ -202,7 +202,7 @@ static int fan53555_set_ramp(struct regulator_dev *rdev, int ramp)
                                  CTL_SLEW_MASK, regval << CTL_SLEW_SHIFT);
 }
 
-static struct regulator_ops fan53555_regulator_ops = {
+static const struct regulator_ops fan53555_regulator_ops = {
        .set_voltage_sel = regulator_set_voltage_sel_regmap,
        .get_voltage_sel = regulator_get_voltage_sel_regmap,
        .set_voltage_time_sel = regulator_set_voltage_time_sel,
index a43b0e8a438d305a959d3d745c7c65bf796ca9f9..988a7472c2ab568c3d1c03d1092c0713073a6d28 100644 (file)
@@ -30,9 +30,6 @@
 #include <linux/of_gpio.h>
 #include <linux/regulator/of_regulator.h>
 #include <linux/regulator/machine.h>
-#include <linux/acpi.h>
-#include <linux/property.h>
-#include <linux/gpio/consumer.h>
 
 struct fixed_voltage_data {
        struct regulator_desc desc;
@@ -97,44 +94,6 @@ of_get_fixed_voltage_config(struct device *dev,
        return config;
 }
 
-/**
- * acpi_get_fixed_voltage_config - extract fixed_voltage_config structure info
- * @dev: device requesting for fixed_voltage_config
- * @desc: regulator description
- *
- * Populates fixed_voltage_config structure by extracting data through ACPI
- * interface, returns a pointer to the populated structure of NULL if memory
- * alloc fails.
- */
-static struct fixed_voltage_config *
-acpi_get_fixed_voltage_config(struct device *dev,
-                             const struct regulator_desc *desc)
-{
-       struct fixed_voltage_config *config;
-       const char *supply_name;
-       struct gpio_desc *gpiod;
-       int ret;
-
-       config = devm_kzalloc(dev, sizeof(*config), GFP_KERNEL);
-       if (!config)
-               return ERR_PTR(-ENOMEM);
-
-       ret = device_property_read_string(dev, "supply-name", &supply_name);
-       if (!ret)
-               config->supply_name = supply_name;
-
-       gpiod = gpiod_get(dev, "gpio", GPIOD_ASIS);
-       if (IS_ERR(gpiod))
-               return ERR_PTR(-ENODEV);
-
-       config->gpio = desc_to_gpio(gpiod);
-       config->enable_high = device_property_read_bool(dev,
-                                                       "enable-active-high");
-       gpiod_put(gpiod);
-
-       return config;
-}
-
 static struct regulator_ops fixed_voltage_ops = {
 };
 
@@ -155,11 +114,6 @@ static int reg_fixed_voltage_probe(struct platform_device *pdev)
                                                     &drvdata->desc);
                if (IS_ERR(config))
                        return PTR_ERR(config);
-       } else if (ACPI_HANDLE(&pdev->dev)) {
-               config = acpi_get_fixed_voltage_config(&pdev->dev,
-                                                      &drvdata->desc);
-               if (IS_ERR(config))
-                       return PTR_ERR(config);
        } else {
                config = dev_get_platdata(&pdev->dev);
        }
index aca18466f5220a68fc989e201e01ce42639a35db..065c100e9a03592f53a4b981fe71899a1b236cba 100644 (file)
@@ -96,7 +96,7 @@ static int hi655x_disable(struct regulator_dev *rdev)
        return ret;
 }
 
-static struct regulator_ops hi655x_regulator_ops = {
+static const struct regulator_ops hi655x_regulator_ops = {
        .enable = regulator_enable_regmap,
        .disable = hi655x_disable,
        .is_enabled = hi655x_is_enabled,
@@ -105,7 +105,7 @@ static struct regulator_ops hi655x_regulator_ops = {
        .set_voltage_sel = regulator_set_voltage_sel_regmap,
 };
 
-static struct regulator_ops hi655x_ldo_linear_ops = {
+static const struct regulator_ops hi655x_ldo_linear_ops = {
        .enable = regulator_enable_regmap,
        .disable = hi655x_disable,
        .is_enabled = hi655x_is_enabled,
index c74ac873402370b5057464b22a694672cccf4d73..1dd575b285649b04b81a0d109b09f77bd7fd01a6 100644 (file)
@@ -51,4 +51,14 @@ regulator_of_get_init_data(struct device *dev,
 }
 #endif
 
+enum regulator_get_type {
+       NORMAL_GET,
+       EXCLUSIVE_GET,
+       OPTIONAL_GET,
+       MAX_GET_TYPE
+};
+
+struct regulator *_regulator_get(struct device *dev, const char *id,
+                                enum regulator_get_type get_type);
+
 #endif
index d6773da925bad00f3bc227294255f8786c2eaf9b..db34e1da75ef8a347b88aba9278911d0e054d288 100644 (file)
@@ -227,7 +227,7 @@ err_i2c:
        return ret;
 }
 
-static struct regulator_ops lp8755_buck_ops = {
+static const struct regulator_ops lp8755_buck_ops = {
        .map_voltage = regulator_map_voltage_linear,
        .list_voltage = regulator_list_voltage_linear,
        .set_voltage_sel = regulator_set_voltage_sel_regmap,
index 47bef328fb58b356c061f2a7d408da9e8a971be7..a7a1a0313bbfc68625080012e681f50916fe947f 100644 (file)
@@ -161,7 +161,7 @@ static int ltc3589_set_suspend_mode(struct regulator_dev *rdev,
 }
 
 /* SW1, SW2, SW3, LDO2 */
-static struct regulator_ops ltc3589_linear_regulator_ops = {
+static const struct regulator_ops ltc3589_linear_regulator_ops = {
        .enable = regulator_enable_regmap,
        .disable = regulator_disable_regmap,
        .is_enabled = regulator_is_enabled_regmap,
@@ -175,18 +175,18 @@ static struct regulator_ops ltc3589_linear_regulator_ops = {
 };
 
 /* BB_OUT, LDO3 */
-static struct regulator_ops ltc3589_fixed_regulator_ops = {
+static const struct regulator_ops ltc3589_fixed_regulator_ops = {
        .enable = regulator_enable_regmap,
        .disable = regulator_disable_regmap,
        .is_enabled = regulator_is_enabled_regmap,
 };
 
 /* LDO1 */
-static struct regulator_ops ltc3589_fixed_standby_regulator_ops = {
+static const struct regulator_ops ltc3589_fixed_standby_regulator_ops = {
 };
 
 /* LDO4 */
-static struct regulator_ops ltc3589_table_regulator_ops = {
+static const struct regulator_ops ltc3589_table_regulator_ops = {
        .enable = regulator_enable_regmap,
        .disable = regulator_disable_regmap,
        .is_enabled = regulator_is_enabled_regmap,
index e2b476ca2b4d67ca828a7e3099ae1142266564b7..503cd90eba393439114a05c0c8d5e788216d7653 100644 (file)
@@ -161,7 +161,7 @@ static int ltc3676_of_parse_cb(struct device_node *np,
 }
 
 /* SW1, SW2, SW3, SW4 linear 0.8V-3.3V with scalar via R1/R2 feeback res */
-static struct regulator_ops ltc3676_linear_regulator_ops = {
+static const struct regulator_ops ltc3676_linear_regulator_ops = {
        .enable = regulator_enable_regmap,
        .disable = regulator_disable_regmap,
        .is_enabled = regulator_is_enabled_regmap,
@@ -173,11 +173,11 @@ static struct regulator_ops ltc3676_linear_regulator_ops = {
 };
 
 /* LDO1 always on fixed 0.8V-3.3V via scalar via R1/R2 feeback res */
-static struct regulator_ops ltc3676_fixed_standby_regulator_ops = {
+static const struct regulator_ops ltc3676_fixed_standby_regulator_ops = {
 };
 
 /* LDO2, LDO3 fixed (LDO2 has external scalar via R1/R2 feedback res) */
-static struct regulator_ops ltc3676_fixed_regulator_ops = {
+static const struct regulator_ops ltc3676_fixed_regulator_ops = {
        .enable = regulator_enable_regmap,
        .disable = regulator_disable_regmap,
        .is_enabled = regulator_is_enabled_regmap,
index c9ff2619971166f0f7b853242fca65e0323c1381..0db288ce319ce4a27562fd5e8afe5cb113c3799a 100644 (file)
@@ -85,14 +85,14 @@ static int max14577_reg_set_current_limit(struct regulator_dev *rdev,
                        reg_data);
 }
 
-static struct regulator_ops max14577_safeout_ops = {
+static const struct regulator_ops max14577_safeout_ops = {
        .is_enabled             = regulator_is_enabled_regmap,
        .enable                 = regulator_enable_regmap,
        .disable                = regulator_disable_regmap,
        .list_voltage           = regulator_list_voltage_linear,
 };
 
-static struct regulator_ops max14577_charger_ops = {
+static const struct regulator_ops max14577_charger_ops = {
        .is_enabled             = max14577_reg_is_enabled,
        .enable                 = regulator_enable_regmap,
        .disable                = regulator_disable_regmap,
@@ -130,7 +130,7 @@ static const struct regulator_desc max14577_supported_regulators[] = {
        [MAX14577_CHARGER] = MAX14577_CHARGER_REG,
 };
 
-static struct regulator_ops max77836_ldo_ops = {
+static const struct regulator_ops max77836_ldo_ops = {
        .is_enabled             = regulator_is_enabled_regmap,
        .enable                 = regulator_enable_regmap,
        .disable                = regulator_disable_regmap,
index d088a7c79e60be4e6799337fd1834c28dea166a3..b94e3a721721b26449be79a49f287411a60d33b0 100644 (file)
@@ -644,7 +644,7 @@ static int max77620_of_parse_cb(struct device_node *np,
        return max77620_init_pmic(pmic, desc->id);
 }
 
-static struct regulator_ops max77620_regulator_ops = {
+static const struct regulator_ops max77620_regulator_ops = {
        .is_enabled = max77620_regulator_is_enabled,
        .enable = max77620_regulator_enable,
        .disable = max77620_regulator_disable,
index ac4fa581e0a5eaa60f1afd4179778883d3919561..c301f37334758599fc98ba615a9159a62c9ba708 100644 (file)
@@ -289,7 +289,7 @@ static int max77686_of_parse_cb(struct device_node *np,
        return 0;
 }
 
-static struct regulator_ops max77686_ops = {
+static const struct regulator_ops max77686_ops = {
        .list_voltage           = regulator_list_voltage_linear,
        .map_voltage            = regulator_map_voltage_linear,
        .is_enabled             = regulator_is_enabled_regmap,
@@ -301,7 +301,7 @@ static struct regulator_ops max77686_ops = {
        .set_suspend_mode       = max77686_set_suspend_mode,
 };
 
-static struct regulator_ops max77686_ldo_ops = {
+static const struct regulator_ops max77686_ldo_ops = {
        .list_voltage           = regulator_list_voltage_linear,
        .map_voltage            = regulator_map_voltage_linear,
        .is_enabled             = regulator_is_enabled_regmap,
@@ -314,7 +314,7 @@ static struct regulator_ops max77686_ldo_ops = {
        .set_suspend_disable    = max77686_set_suspend_disable,
 };
 
-static struct regulator_ops max77686_buck1_ops = {
+static const struct regulator_ops max77686_buck1_ops = {
        .list_voltage           = regulator_list_voltage_linear,
        .map_voltage            = regulator_map_voltage_linear,
        .is_enabled             = regulator_is_enabled_regmap,
@@ -326,7 +326,7 @@ static struct regulator_ops max77686_buck1_ops = {
        .set_suspend_disable    = max77686_set_suspend_disable,
 };
 
-static struct regulator_ops max77686_buck_dvs_ops = {
+static const struct regulator_ops max77686_buck_dvs_ops = {
        .list_voltage           = regulator_list_voltage_linear,
        .map_voltage            = regulator_map_voltage_linear,
        .is_enabled             = regulator_is_enabled_regmap,
index cfbb9512e48623429899cbe0785332b0a1171734..3fce67982682c576748a8f5b5b740dc6cac447d2 100644 (file)
@@ -141,7 +141,7 @@ static const unsigned int max77693_safeout_table[] = {
        3300000,
 };
 
-static struct regulator_ops max77693_safeout_ops = {
+static const struct regulator_ops max77693_safeout_ops = {
        .list_voltage           = regulator_list_voltage_table,
        .is_enabled             = regulator_is_enabled_regmap,
        .enable                 = regulator_enable_regmap,
index 1d3539324d9ae3e0ca7b8d6bff7ba1c0d8d87148..b6261903818c6266a431f3cb2bb8d83b7a5296f1 100644 (file)
@@ -288,7 +288,7 @@ static int max77802_set_ramp_delay_4bit(struct regulator_dev *rdev,
 /*
  * LDOs 2, 4-19, 22-35
  */
-static struct regulator_ops max77802_ldo_ops_logic1 = {
+static const struct regulator_ops max77802_ldo_ops_logic1 = {
        .list_voltage           = regulator_list_voltage_linear,
        .map_voltage            = regulator_map_voltage_linear,
        .is_enabled             = regulator_is_enabled_regmap,
@@ -304,7 +304,7 @@ static struct regulator_ops max77802_ldo_ops_logic1 = {
 /*
  * LDOs 1, 20, 21, 3
  */
-static struct regulator_ops max77802_ldo_ops_logic2 = {
+static const struct regulator_ops max77802_ldo_ops_logic2 = {
        .list_voltage           = regulator_list_voltage_linear,
        .map_voltage            = regulator_map_voltage_linear,
        .is_enabled             = regulator_is_enabled_regmap,
@@ -319,7 +319,7 @@ static struct regulator_ops max77802_ldo_ops_logic2 = {
 };
 
 /* BUCKS 1, 6 */
-static struct regulator_ops max77802_buck_16_dvs_ops = {
+static const struct regulator_ops max77802_buck_16_dvs_ops = {
        .list_voltage           = regulator_list_voltage_linear,
        .map_voltage            = regulator_map_voltage_linear,
        .is_enabled             = regulator_is_enabled_regmap,
@@ -333,7 +333,7 @@ static struct regulator_ops max77802_buck_16_dvs_ops = {
 };
 
 /* BUCKs 2-4 */
-static struct regulator_ops max77802_buck_234_ops = {
+static const struct regulator_ops max77802_buck_234_ops = {
        .list_voltage           = regulator_list_voltage_linear,
        .map_voltage            = regulator_map_voltage_linear,
        .is_enabled             = regulator_is_enabled_regmap,
@@ -348,7 +348,7 @@ static struct regulator_ops max77802_buck_234_ops = {
 };
 
 /* BUCKs 5, 7-10 */
-static struct regulator_ops max77802_buck_dvs_ops = {
+static const struct regulator_ops max77802_buck_dvs_ops = {
        .list_voltage           = regulator_list_voltage_linear,
        .map_voltage            = regulator_map_voltage_linear,
        .is_enabled             = regulator_is_enabled_regmap,
index 5e941db5ccafb2c2bff40ed04bce76c8012787e9..860400d2cd8591522ca3d4ff4366459cbdc2397e 100644 (file)
@@ -109,7 +109,7 @@ struct max8907_regulator {
 #define LDO_650_25(id, supply, base) REG_LDO(id, supply, (base), \
                        650000, 2225000, 25000)
 
-static struct regulator_ops max8907_mbatt_ops = {
+static const struct regulator_ops max8907_mbatt_ops = {
 };
 
 static struct regulator_ops max8907_ldo_ops = {
@@ -121,13 +121,13 @@ static struct regulator_ops max8907_ldo_ops = {
        .is_enabled = regulator_is_enabled_regmap,
 };
 
-static struct regulator_ops max8907_ldo_hwctl_ops = {
+static const struct regulator_ops max8907_ldo_hwctl_ops = {
        .list_voltage = regulator_list_voltage_linear,
        .set_voltage_sel = regulator_set_voltage_sel_regmap,
        .get_voltage_sel = regulator_get_voltage_sel_regmap,
 };
 
-static struct regulator_ops max8907_fixed_ops = {
+static const struct regulator_ops max8907_fixed_ops = {
        .list_voltage = regulator_list_voltage_linear,
 };
 
@@ -138,11 +138,11 @@ static struct regulator_ops max8907_out5v_ops = {
        .is_enabled = regulator_is_enabled_regmap,
 };
 
-static struct regulator_ops max8907_out5v_hwctl_ops = {
+static const struct regulator_ops max8907_out5v_hwctl_ops = {
        .list_voltage = regulator_list_voltage_linear,
 };
 
-static struct regulator_ops max8907_bbat_ops = {
+static const struct regulator_ops max8907_bbat_ops = {
        .list_voltage = regulator_list_voltage_linear,
        .set_voltage_sel = regulator_set_voltage_sel_regmap,
        .get_voltage_sel = regulator_get_voltage_sel_regmap,
index c802f0239dc790bfb5051c28640773d5085b95cd..39b63ddefeb2df7a397e8075164b88d5ecd1171f 100644 (file)
@@ -132,7 +132,7 @@ static int max8925_set_dvm_disable(struct regulator_dev *rdev)
        return max8925_set_bits(info->i2c, info->vol_reg, 1 << SD1_DVM_EN, 0);
 }
 
-static struct regulator_ops max8925_regulator_sdv_ops = {
+static const struct regulator_ops max8925_regulator_sdv_ops = {
        .map_voltage            = regulator_map_voltage_linear,
        .list_voltage           = regulator_list_voltage_linear,
        .set_voltage_sel        = max8925_set_voltage_sel,
@@ -145,7 +145,7 @@ static struct regulator_ops max8925_regulator_sdv_ops = {
        .set_suspend_disable    = max8925_set_dvm_disable,
 };
 
-static struct regulator_ops max8925_regulator_ldo_ops = {
+static const struct regulator_ops max8925_regulator_ldo_ops = {
        .map_voltage            = regulator_map_voltage_linear,
        .list_voltage           = regulator_list_voltage_linear,
        .set_voltage_sel        = max8925_set_voltage_sel,
index 1af8f4a2ab86dedab889510baeba7cc93921ea06..1096546c05e95d5842853c77859380797ac9308a 100644 (file)
@@ -113,7 +113,7 @@ static int max8952_set_voltage_sel(struct regulator_dev *rdev,
        return 0;
 }
 
-static struct regulator_ops max8952_ops = {
+static const struct regulator_ops max8952_ops = {
        .list_voltage           = max8952_list_voltage,
        .get_voltage_sel        = max8952_get_voltage_sel,
        .set_voltage_sel        = max8952_set_voltage_sel,
index f11d41dad9c13b9d581fe6fa122f1898bc140e55..31ae5ee3a80d87b2c296c54efb75e726b3a83b84 100644 (file)
@@ -528,7 +528,7 @@ static int palmas_smps_set_ramp_delay(struct regulator_dev *rdev,
        return ret;
 }
 
-static struct regulator_ops palmas_ops_smps = {
+static const struct regulator_ops palmas_ops_smps = {
        .is_enabled             = regulator_is_enabled_regmap,
        .enable                 = regulator_enable_regmap,
        .disable                = regulator_disable_regmap,
@@ -542,7 +542,7 @@ static struct regulator_ops palmas_ops_smps = {
        .set_ramp_delay         = palmas_smps_set_ramp_delay,
 };
 
-static struct regulator_ops palmas_ops_ext_control_smps = {
+static const struct regulator_ops palmas_ops_ext_control_smps = {
        .set_mode               = palmas_set_mode_smps,
        .get_mode               = palmas_get_mode_smps,
        .get_voltage_sel        = regulator_get_voltage_sel_regmap,
@@ -553,7 +553,7 @@ static struct regulator_ops palmas_ops_ext_control_smps = {
        .set_ramp_delay         = palmas_smps_set_ramp_delay,
 };
 
-static struct regulator_ops palmas_ops_smps10 = {
+static const struct regulator_ops palmas_ops_smps10 = {
        .is_enabled             = regulator_is_enabled_regmap,
        .enable                 = regulator_enable_regmap,
        .disable                = regulator_disable_regmap,
@@ -565,7 +565,7 @@ static struct regulator_ops palmas_ops_smps10 = {
        .get_bypass             = regulator_get_bypass_regmap,
 };
 
-static struct regulator_ops tps65917_ops_smps = {
+static const struct regulator_ops tps65917_ops_smps = {
        .is_enabled             = regulator_is_enabled_regmap,
        .enable                 = regulator_enable_regmap,
        .disable                = regulator_disable_regmap,
@@ -578,7 +578,7 @@ static struct regulator_ops tps65917_ops_smps = {
        .set_voltage_time_sel   = regulator_set_voltage_time_sel,
 };
 
-static struct regulator_ops tps65917_ops_ext_control_smps = {
+static const struct regulator_ops tps65917_ops_ext_control_smps = {
        .set_mode               = palmas_set_mode_smps,
        .get_mode               = palmas_get_mode_smps,
        .get_voltage_sel        = regulator_get_voltage_sel_regmap,
@@ -602,7 +602,7 @@ static int palmas_is_enabled_ldo(struct regulator_dev *dev)
        return !!(reg);
 }
 
-static struct regulator_ops palmas_ops_ldo = {
+static const struct regulator_ops palmas_ops_ldo = {
        .is_enabled             = palmas_is_enabled_ldo,
        .enable                 = regulator_enable_regmap,
        .disable                = regulator_disable_regmap,
@@ -612,7 +612,7 @@ static struct regulator_ops palmas_ops_ldo = {
        .map_voltage            = regulator_map_voltage_linear,
 };
 
-static struct regulator_ops palmas_ops_ldo9 = {
+static const struct regulator_ops palmas_ops_ldo9 = {
        .is_enabled             = palmas_is_enabled_ldo,
        .enable                 = regulator_enable_regmap,
        .disable                = regulator_disable_regmap,
@@ -624,23 +624,23 @@ static struct regulator_ops palmas_ops_ldo9 = {
        .get_bypass             = regulator_get_bypass_regmap,
 };
 
-static struct regulator_ops palmas_ops_ext_control_ldo = {
+static const struct regulator_ops palmas_ops_ext_control_ldo = {
        .get_voltage_sel        = regulator_get_voltage_sel_regmap,
        .set_voltage_sel        = regulator_set_voltage_sel_regmap,
        .list_voltage           = regulator_list_voltage_linear,
        .map_voltage            = regulator_map_voltage_linear,
 };
 
-static struct regulator_ops palmas_ops_extreg = {
+static const struct regulator_ops palmas_ops_extreg = {
        .is_enabled             = regulator_is_enabled_regmap,
        .enable                 = regulator_enable_regmap,
        .disable                = regulator_disable_regmap,
 };
 
-static struct regulator_ops palmas_ops_ext_control_extreg = {
+static const struct regulator_ops palmas_ops_ext_control_extreg = {
 };
 
-static struct regulator_ops tps65917_ops_ldo = {
+static const struct regulator_ops tps65917_ops_ldo = {
        .is_enabled             = palmas_is_enabled_ldo,
        .enable                 = regulator_enable_regmap,
        .disable                = regulator_disable_regmap,
@@ -651,7 +651,7 @@ static struct regulator_ops tps65917_ops_ldo = {
        .set_voltage_time_sel   = regulator_set_voltage_time_sel,
 };
 
-static struct regulator_ops tps65917_ops_ldo_1_2 = {
+static const struct regulator_ops tps65917_ops_ldo_1_2 = {
        .is_enabled             = palmas_is_enabled_ldo,
        .enable                 = regulator_enable_regmap,
        .disable                = regulator_disable_regmap,
index f9d74d63be7c7e6bb88d7cfeb8d4d4046d919002..0cb76ba29e84303b9eaf1e32a2c7b81445a32935 100644 (file)
@@ -54,7 +54,7 @@ static const unsigned int pbias_volt_table[] = {
        3000000
 };
 
-static struct regulator_ops pbias_regulator_voltage_ops = {
+static const struct regulator_ops pbias_regulator_voltage_ops = {
        .list_voltage = regulator_list_voltage_table,
        .get_voltage_sel = regulator_get_voltage_sel_regmap,
        .set_voltage_sel = regulator_set_voltage_sel_regmap,
index 9b16e6158f15045e1b725c9e1781f0f98f22bd59..79cb971a69bb1bd616d6c2ddd81f6a1c8dd3dfcc 100644 (file)
@@ -210,7 +210,7 @@ static int pcap_regulator_is_enabled(struct regulator_dev *rdev)
        return (tmp >> vreg->en) & 1;
 }
 
-static struct regulator_ops pcap_regulator_ops = {
+static const struct regulator_ops pcap_regulator_ops = {
        .list_voltage   = regulator_list_voltage_table,
        .set_voltage_sel = pcap_regulator_set_voltage_sel,
        .get_voltage_sel = pcap_regulator_get_voltage_sel,
index 134f90ec9ca1fc80c1728d0ae4d6778edc93e2f6..762e18447cae06601c8658b974e23c62851996e7 100644 (file)
@@ -41,7 +41,7 @@
                .enable_mask = PCF50633_REGULATOR_ON,           \
        }
 
-static struct regulator_ops pcf50633_regulator_ops = {
+static const struct regulator_ops pcf50633_regulator_ops = {
        .set_voltage_sel = regulator_set_voltage_sel_regmap,
        .get_voltage_sel = regulator_get_voltage_sel_regmap,
        .list_voltage = regulator_list_voltage_linear,
index cb18b5c4f2db967e05713d7a29dd0433ea4f778a..e193bbbb8ffc1beebabd453a06dc8670d159fe0d 100644 (file)
@@ -126,7 +126,7 @@ static int pfuze100_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
        return ret;
 }
 
-static struct regulator_ops pfuze100_ldo_regulator_ops = {
+static const struct regulator_ops pfuze100_ldo_regulator_ops = {
        .enable = regulator_enable_regmap,
        .disable = regulator_disable_regmap,
        .is_enabled = regulator_is_enabled_regmap,
@@ -135,14 +135,14 @@ static struct regulator_ops pfuze100_ldo_regulator_ops = {
        .get_voltage_sel = regulator_get_voltage_sel_regmap,
 };
 
-static struct regulator_ops pfuze100_fixed_regulator_ops = {
+static const struct regulator_ops pfuze100_fixed_regulator_ops = {
        .enable = regulator_enable_regmap,
        .disable = regulator_disable_regmap,
        .is_enabled = regulator_is_enabled_regmap,
        .list_voltage = regulator_list_voltage_linear,
 };
 
-static struct regulator_ops pfuze100_sw_regulator_ops = {
+static const struct regulator_ops pfuze100_sw_regulator_ops = {
        .list_voltage = regulator_list_voltage_linear,
        .set_voltage_sel = regulator_set_voltage_sel_regmap,
        .get_voltage_sel = regulator_get_voltage_sel_regmap,
@@ -150,7 +150,7 @@ static struct regulator_ops pfuze100_sw_regulator_ops = {
        .set_ramp_delay = pfuze100_set_ramp_delay,
 };
 
-static struct regulator_ops pfuze100_swb_regulator_ops = {
+static const struct regulator_ops pfuze100_swb_regulator_ops = {
        .enable = regulator_enable_regmap,
        .disable = regulator_disable_regmap,
        .list_voltage = regulator_list_voltage_table,
index 6c4afc73ecac3d7cb47d1a87da60b95f14ba9370..a9446056435f9b71772d4cecb33017c42abeebcf 100644 (file)
@@ -162,7 +162,7 @@ static int pv88060_get_current_limit(struct regulator_dev *rdev)
        return info->current_limits[data];
 }
 
-static struct regulator_ops pv88060_buck_ops = {
+static const struct regulator_ops pv88060_buck_ops = {
        .get_mode = pv88060_buck_get_mode,
        .set_mode = pv88060_buck_set_mode,
        .enable = regulator_enable_regmap,
@@ -175,7 +175,7 @@ static struct regulator_ops pv88060_buck_ops = {
        .get_current_limit = pv88060_get_current_limit,
 };
 
-static struct regulator_ops pv88060_ldo_ops = {
+static const struct regulator_ops pv88060_ldo_ops = {
        .enable = regulator_enable_regmap,
        .disable = regulator_disable_regmap,
        .is_enabled = regulator_is_enabled_regmap,
index 954a20eeb26f89362be0a4274d364b22a8db3783..9a08cb2de501e95175fe4f56168ec0bfe123b08a 100644 (file)
@@ -306,7 +306,7 @@ static int pv88080_get_current_limit(struct regulator_dev *rdev)
        return info->current_limits[data];
 }
 
-static struct regulator_ops pv88080_buck_ops = {
+static const struct regulator_ops pv88080_buck_ops = {
        .get_mode = pv88080_buck_get_mode,
        .set_mode = pv88080_buck_set_mode,
        .enable = regulator_enable_regmap,
@@ -319,7 +319,7 @@ static struct regulator_ops pv88080_buck_ops = {
        .get_current_limit = pv88080_get_current_limit,
 };
 
-static struct regulator_ops pv88080_hvbuck_ops = {
+static const struct regulator_ops pv88080_hvbuck_ops = {
        .enable = regulator_enable_regmap,
        .disable = regulator_disable_regmap,
        .is_enabled = regulator_is_enabled_regmap,
index 42164117535298bc011378bad23f221e8063c566..ab51e254d13a0dacd78a6f4b910fd34b7164d353 100644 (file)
@@ -184,7 +184,7 @@ static int pv88090_get_current_limit(struct regulator_dev *rdev)
        return info->current_limits[data];
 }
 
-static struct regulator_ops pv88090_buck_ops = {
+static const struct regulator_ops pv88090_buck_ops = {
        .get_mode = pv88090_buck_get_mode,
        .set_mode = pv88090_buck_set_mode,
        .enable = regulator_enable_regmap,
@@ -197,7 +197,7 @@ static struct regulator_ops pv88090_buck_ops = {
        .get_current_limit = pv88090_get_current_limit,
 };
 
-static struct regulator_ops pv88090_ldo_ops = {
+static const struct regulator_ops pv88090_ldo_ops = {
        .enable = regulator_enable_regmap,
        .disable = regulator_disable_regmap,
        .is_enabled = regulator_is_enabled_regmap,
index 8ed46a9a55c8469e38c0b14f7c5c8c95fb4fe569..f35994a2a5be043a88aae22b5a2875cd96de7180 100644 (file)
@@ -305,6 +305,56 @@ static const struct regulator_desc pm8916_buck_hvo_smps = {
        .ops = &rpm_smps_ldo_ops,
 };
 
+static const struct regulator_desc pm8994_hfsmps = {
+       .linear_ranges = (struct regulator_linear_range[]) {
+               REGULATOR_LINEAR_RANGE( 375000,  0,  95, 12500),
+               REGULATOR_LINEAR_RANGE(1550000, 96, 158, 25000),
+       },
+       .n_linear_ranges = 2,
+       .n_voltages = 159,
+       .ops = &rpm_smps_ldo_ops,
+};
+
+static const struct regulator_desc pm8994_ftsmps = {
+       .linear_ranges = (struct regulator_linear_range[]) {
+               REGULATOR_LINEAR_RANGE(350000,  0, 199, 5000),
+               REGULATOR_LINEAR_RANGE(700000, 200, 349, 10000),
+       },
+       .n_linear_ranges = 2,
+       .n_voltages = 350,
+       .ops = &rpm_smps_ldo_ops,
+};
+
+static const struct regulator_desc pm8994_nldo = {
+       .linear_ranges = (struct regulator_linear_range[]) {
+               REGULATOR_LINEAR_RANGE(750000, 0, 63, 12500),
+       },
+       .n_linear_ranges = 1,
+       .n_voltages = 64,
+       .ops = &rpm_smps_ldo_ops,
+};
+
+static const struct regulator_desc pm8994_pldo = {
+       .linear_ranges = (struct regulator_linear_range[]) {
+               REGULATOR_LINEAR_RANGE( 750000,  0,  63, 12500),
+               REGULATOR_LINEAR_RANGE(1550000, 64, 126, 25000),
+               REGULATOR_LINEAR_RANGE(3100000, 127, 163, 50000),
+       },
+       .n_linear_ranges = 3,
+       .n_voltages = 164,
+       .ops = &rpm_smps_ldo_ops,
+};
+
+static const struct regulator_desc pm8994_switch = {
+       .ops = &rpm_switch_ops,
+};
+
+static const struct regulator_desc pm8994_lnldo = {
+       .fixed_uV = 1740000,
+       .n_voltages = 1,
+       .ops = &rpm_smps_ldo_ops_fixed,
+};
+
 struct rpm_regulator_data {
        const char *name;
        u32 type;
@@ -443,10 +493,62 @@ static const struct rpm_regulator_data rpm_pma8084_regulators[] = {
        {}
 };
 
+static const struct rpm_regulator_data rpm_pm8994_regulators[] = {
+       { "s1", QCOM_SMD_RPM_SMPA, 1, &pm8994_ftsmps, "vdd_s1" },
+       { "s2", QCOM_SMD_RPM_SMPA, 2, &pm8994_ftsmps, "vdd_s2" },
+       { "s3", QCOM_SMD_RPM_SMPA, 3, &pm8994_hfsmps, "vdd_s3" },
+       { "s4", QCOM_SMD_RPM_SMPA, 4, &pm8994_hfsmps, "vdd_s4" },
+       { "s5", QCOM_SMD_RPM_SMPA, 5, &pm8994_hfsmps, "vdd_s5" },
+       { "s6", QCOM_SMD_RPM_SMPA, 6, &pm8994_ftsmps, "vdd_s6" },
+       { "s7", QCOM_SMD_RPM_SMPA, 7, &pm8994_hfsmps, "vdd_s7" },
+       { "s8", QCOM_SMD_RPM_SMPA, 8, &pm8994_ftsmps, "vdd_s8" },
+       { "s9", QCOM_SMD_RPM_SMPA, 9, &pm8994_ftsmps, "vdd_s9" },
+       { "s10", QCOM_SMD_RPM_SMPA, 10, &pm8994_ftsmps, "vdd_s10" },
+       { "s11", QCOM_SMD_RPM_SMPA, 11, &pm8994_ftsmps, "vdd_s11" },
+       { "s12", QCOM_SMD_RPM_SMPA, 12, &pm8994_ftsmps, "vdd_s12" },
+       { "l1", QCOM_SMD_RPM_LDOA, 1, &pm8994_nldo, "vdd_l1" },
+       { "l2", QCOM_SMD_RPM_LDOA, 2, &pm8994_nldo, "vdd_l2_l26_l28" },
+       { "l3", QCOM_SMD_RPM_LDOA, 3, &pm8994_nldo, "vdd_l3_l11" },
+       { "l4", QCOM_SMD_RPM_LDOA, 4, &pm8994_nldo, "vdd_l4_l27_l31" },
+       { "l5", QCOM_SMD_RPM_LDOA, 5, &pm8994_lnldo, "vdd_l5_l7" },
+       { "l6", QCOM_SMD_RPM_LDOA, 6, &pm8994_pldo, "vdd_l6_l12_l32" },
+       { "l7", QCOM_SMD_RPM_LDOA, 7, &pm8994_lnldo, "vdd_l5_l7" },
+       { "l8", QCOM_SMD_RPM_LDOA, 8, &pm8994_pldo, "vdd_l8_l16_l30" },
+       { "l9", QCOM_SMD_RPM_LDOA, 9, &pm8994_pldo, "vdd_l9_l10_l18_l22" },
+       { "l10", QCOM_SMD_RPM_LDOA, 10, &pm8994_pldo, "vdd_l9_l10_l18_l22" },
+       { "l11", QCOM_SMD_RPM_LDOA, 11, &pm8994_nldo, "vdd_l3_l11" },
+       { "l12", QCOM_SMD_RPM_LDOA, 12, &pm8994_pldo, "vdd_l6_l12_l32" },
+       { "l13", QCOM_SMD_RPM_LDOA, 13, &pm8994_pldo, "vdd_l13_l19_l23_l24" },
+       { "l14", QCOM_SMD_RPM_LDOA, 14, &pm8994_pldo, "vdd_l14_l15" },
+       { "l15", QCOM_SMD_RPM_LDOA, 15, &pm8994_pldo, "vdd_l14_l15" },
+       { "l16", QCOM_SMD_RPM_LDOA, 16, &pm8994_pldo, "vdd_l8_l16_l30" },
+       { "l17", QCOM_SMD_RPM_LDOA, 17, &pm8994_pldo, "vdd_l17_l29" },
+       { "l18", QCOM_SMD_RPM_LDOA, 18, &pm8994_pldo, "vdd_l9_l10_l18_l22" },
+       { "l19", QCOM_SMD_RPM_LDOA, 19, &pm8994_pldo, "vdd_l13_l19_l23_l24" },
+       { "l20", QCOM_SMD_RPM_LDOA, 20, &pm8994_pldo, "vdd_l20_l21" },
+       { "l21", QCOM_SMD_RPM_LDOA, 21, &pm8994_pldo, "vdd_l20_l21" },
+       { "l22", QCOM_SMD_RPM_LDOA, 22, &pm8994_pldo, "vdd_l9_l10_l18_l22" },
+       { "l23", QCOM_SMD_RPM_LDOA, 23, &pm8994_pldo, "vdd_l13_l19_l23_l24" },
+       { "l24", QCOM_SMD_RPM_LDOA, 24, &pm8994_pldo, "vdd_l13_l19_l23_l24" },
+       { "l25", QCOM_SMD_RPM_LDOA, 25, &pm8994_pldo, "vdd_l25" },
+       { "l26", QCOM_SMD_RPM_LDOA, 26, &pm8994_nldo, "vdd_l2_l26_l28" },
+       { "l27", QCOM_SMD_RPM_LDOA, 27, &pm8994_nldo, "vdd_l4_l27_l31" },
+       { "l28", QCOM_SMD_RPM_LDOA, 28, &pm8994_nldo, "vdd_l2_l26_l28" },
+       { "l29", QCOM_SMD_RPM_LDOA, 29, &pm8994_pldo, "vdd_l17_l29" },
+       { "l30", QCOM_SMD_RPM_LDOA, 30, &pm8994_pldo, "vdd_l8_l16_l30" },
+       { "l31", QCOM_SMD_RPM_LDOA, 31, &pm8994_nldo, "vdd_l4_l27_l31" },
+       { "l32", QCOM_SMD_RPM_LDOA, 32, &pm8994_pldo, "vdd_l6_l12_l32" },
+       { "lvs1", QCOM_SMD_RPM_VSA, 1, &pm8994_switch, "vdd_lvs1_2" },
+       { "lvs2", QCOM_SMD_RPM_VSA, 2, &pm8994_switch, "vdd_lvs1_2" },
+
+       {}
+};
+
 static const struct of_device_id rpm_of_match[] = {
        { .compatible = "qcom,rpm-pm8841-regulators", .data = &rpm_pm8841_regulators },
        { .compatible = "qcom,rpm-pm8916-regulators", .data = &rpm_pm8916_regulators },
        { .compatible = "qcom,rpm-pm8941-regulators", .data = &rpm_pm8941_regulators },
+       { .compatible = "qcom,rpm-pm8994-regulators", .data = &rpm_pm8994_regulators },
        { .compatible = "qcom,rpm-pma8084-regulators", .data = &rpm_pma8084_regulators },
        {}
 };
index d2e67c5121955915a2f9d561e40a62361bbe552e..d0f1340168b18080875e08e8b0b1f578cc5c3091 100644 (file)
@@ -61,7 +61,7 @@ static int rc5t583_regulator_enable_time(struct regulator_dev *rdev)
        return DIV_ROUND_UP(curr_uV, reg->reg_info->enable_uv_per_us);
 }
 
-static struct regulator_ops rc5t583_ops = {
+static const struct regulator_ops rc5t583_ops = {
        .is_enabled             = regulator_is_enabled_regmap,
        .enable                 = regulator_enable_regmap,
        .disable                = regulator_disable_regmap,
index 9c930eb68cda69bf6ad87cec1d302b2c7f83e9ba..8d2819e36654522ff5605317452904a54dffa2d4 100644 (file)
@@ -19,7 +19,7 @@
 #include <linux/regulator/driver.h>
 #include <linux/regulator/of_regulator.h>
 
-static struct regulator_ops rn5t618_reg_ops = {
+static const struct regulator_ops rn5t618_reg_ops = {
        .enable                 = regulator_enable_regmap,
        .disable                = regulator_disable_regmap,
        .is_enabled             = regulator_is_enabled_regmap,
index 92f88753bfed274fa5acbec2d2e37dda22c43380..38ee97a085f915921d8d67b4ae839f202422eaa8 100644 (file)
@@ -26,6 +26,7 @@
 #define S2MPA01_REGULATOR_CNT ARRAY_SIZE(regulators)
 
 struct s2mpa01_info {
+       struct of_regulator_match rdata[S2MPA01_REGULATOR_MAX];
        int ramp_delay24;
        int ramp_delay3;
        int ramp_delay5;
@@ -341,9 +342,9 @@ static int s2mpa01_pmic_probe(struct platform_device *pdev)
 {
        struct sec_pmic_dev *iodev = dev_get_drvdata(pdev->dev.parent);
        struct sec_platform_data *pdata = dev_get_platdata(iodev->dev);
-       struct of_regulator_match rdata[S2MPA01_REGULATOR_MAX] = { };
        struct device_node *reg_np = NULL;
        struct regulator_config config = { };
+       struct of_regulator_match *rdata;
        struct s2mpa01_info *s2mpa01;
        int i;
 
@@ -351,6 +352,7 @@ static int s2mpa01_pmic_probe(struct platform_device *pdev)
        if (!s2mpa01)
                return -ENOMEM;
 
+       rdata = s2mpa01->rdata;
        for (i = 0; i < S2MPA01_REGULATOR_CNT; i++)
                rdata[i].name = regulators[i].name;
 
index ecb0371780af6d8927a95c5ec61da1849172be40..45e96e15469005f376478d49dbc09045aae4bc3c 100644 (file)
@@ -157,19 +157,19 @@ static struct tps65086_regulator regulators[] = {
                           VDOA23_VID_MASK, TPS65086_LDOA3CTRL, BIT(0),
                           tps65086_ldoa23_ranges, 0, 0),
        TPS65086_SWITCH("SWA1", "swa1", SWA1, TPS65086_SWVTT_EN, BIT(5)),
-       TPS65086_SWITCH("SWB1", "swa2", SWB1, TPS65086_SWVTT_EN, BIT(6)),
-       TPS65086_SWITCH("SWB2", "swa3", SWB2, TPS65086_SWVTT_EN, BIT(7)),
+       TPS65086_SWITCH("SWB1", "swb1", SWB1, TPS65086_SWVTT_EN, BIT(6)),
+       TPS65086_SWITCH("SWB2", "swb2", SWB2, TPS65086_SWVTT_EN, BIT(7)),
        TPS65086_SWITCH("VTT", "vtt", VTT, TPS65086_SWVTT_EN, BIT(4)),
 };
 
-static int tps65086_of_parse_cb(struct device_node *dev,
+static int tps65086_of_parse_cb(struct device_node *node,
                                const struct regulator_desc *desc,
                                struct regulator_config *config)
 {
        int ret;
 
        /* Check for 25mV step mode */
-       if (of_property_read_bool(config->of_node, "ti,regulator-step-size-25mv")) {
+       if (of_property_read_bool(node, "ti,regulator-step-size-25mv")) {
                switch (desc->id) {
                case BUCK1:
                case BUCK2:
@@ -193,7 +193,7 @@ static int tps65086_of_parse_cb(struct device_node *dev,
        }
 
        /* Check for decay mode */
-       if (desc->id <= BUCK6 && of_property_read_bool(config->of_node, "ti,regulator-decay")) {
+       if (desc->id <= BUCK6 && of_property_read_bool(node, "ti,regulator-decay")) {
                ret = regmap_write_bits(config->regmap,
                                        regulators[desc->id].decay_reg,
                                        regulators[desc->id].decay_mask,
index 2d12b9af35402c6f3af37a0b9e24c1c3227fb66f..5324dc9e6d6e67d763f4f65c9532fe39f2fd0a0c 100644 (file)
@@ -179,7 +179,8 @@ static const struct regulator_desc regulators[] = {
        TPS65217_REGULATOR("DCDC1", TPS65217_DCDC_1, "dcdc1",
                           tps65217_pmic_ops, 64, TPS65217_REG_DEFDCDC1,
                           TPS65217_DEFDCDCX_DCDC_MASK, TPS65217_ENABLE_DC1_EN,
-                          NULL, tps65217_uv1_ranges, 2, TPS65217_REG_SEQ1,
+                          NULL, tps65217_uv1_ranges,
+                          ARRAY_SIZE(tps65217_uv1_ranges), TPS65217_REG_SEQ1,
                           TPS65217_SEQ1_DC1_SEQ_MASK),
        TPS65217_REGULATOR("DCDC2", TPS65217_DCDC_2, "dcdc2",
                           tps65217_pmic_ops, 64, TPS65217_REG_DEFDCDC2,
@@ -190,7 +191,8 @@ static const struct regulator_desc regulators[] = {
        TPS65217_REGULATOR("DCDC3", TPS65217_DCDC_3, "dcdc3",
                           tps65217_pmic_ops, 64, TPS65217_REG_DEFDCDC3,
                           TPS65217_DEFDCDCX_DCDC_MASK, TPS65217_ENABLE_DC3_EN,
-                          NULL, tps65217_uv1_ranges, 1, TPS65217_REG_SEQ2,
+                          NULL, tps65217_uv1_ranges,
+                          ARRAY_SIZE(tps65217_uv1_ranges), TPS65217_REG_SEQ2,
                           TPS65217_SEQ2_DC3_SEQ_MASK),
        TPS65217_REGULATOR("LDO1", TPS65217_LDO_1, "ldo1",
                           tps65217_pmic_ldo1_ops, 16, TPS65217_REG_DEFLDO1,
index 4864b9d742c0f7915cc792aaacd692c2a7f305b0..716191046a70782b0007033dda6e1402c0d68ea3 100644 (file)
@@ -452,7 +452,7 @@ static int twl6030smps_map_voltage(struct regulator_dev *rdev, int min_uV,
                        vsel = 62;
                else if ((min_uV > 1800000) && (min_uV <= 1900000))
                        vsel = 61;
-               else if ((min_uV > 1350000) && (min_uV <= 1800000))
+               else if ((min_uV > 1500000) && (min_uV <= 1800000))
                        vsel = 60;
                else if ((min_uV > 1350000) && (min_uV <= 1500000))
                        vsel = 59;
index 10368ed8fd136230d1d9a13492bf557bd063a669..b6f5f1e1826c15c0e7f6db0a9af599dba39d5878 100644 (file)
@@ -163,7 +163,7 @@ int reset_control_reset(struct reset_control *rstc)
        }
 
        ret = rstc->rcdev->ops->reset(rstc->rcdev, rstc->id);
-       if (rstc->shared && !ret)
+       if (rstc->shared && ret)
                atomic_dec(&rstc->triggered_count);
 
        return ret;
index c93c5a8fba32925584dbc28c60610786328d09ff..5dc673dc948785a79da8d070954323b9357385c1 100644 (file)
@@ -1551,12 +1551,15 @@ config RTC_DRV_MPC5121
          will be called rtc-mpc5121.
 
 config RTC_DRV_JZ4740
-       bool "Ingenic JZ4740 SoC"
+       tristate "Ingenic JZ4740 SoC"
        depends on MACH_INGENIC || COMPILE_TEST
        help
          If you say yes here you get support for the Ingenic JZ47xx SoCs RTC
          controllers.
 
+         This driver can also be buillt as a module. If so, the module
+         will be called rtc-jz4740.
+
 config RTC_DRV_LPC24XX
        tristate "NXP RTC for LPC178x/18xx/408x/43xx"
        depends on ARCH_LPC18XX || COMPILE_TEST
index 72918c1ba0928d4fc78d921db621c489fafb9701..64989afffa3daada4b062321c527f18bca142bbb 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/clk.h>
 #include <linux/io.h>
 #include <linux/kernel.h>
+#include <linux/module.h>
 #include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/reboot.h>
@@ -294,7 +295,7 @@ static void jz4740_rtc_power_off(void)
                             JZ_REG_RTC_RESET_COUNTER, reset_counter_ticks);
 
        jz4740_rtc_poweroff(dev_for_power_off);
-       machine_halt();
+       kernel_halt();
 }
 
 static const struct of_device_id jz4740_rtc_of_match[] = {
@@ -302,6 +303,7 @@ static const struct of_device_id jz4740_rtc_of_match[] = {
        { .compatible = "ingenic,jz4780-rtc", .data = (void *)ID_JZ4780 },
        {},
 };
+MODULE_DEVICE_TABLE(of, jz4740_rtc_of_match);
 
 static int jz4740_rtc_probe(struct platform_device *pdev)
 {
@@ -429,6 +431,7 @@ static const struct platform_device_id jz4740_rtc_ids[] = {
        { "jz4780-rtc", ID_JZ4780 },
        {}
 };
+MODULE_DEVICE_TABLE(platform, jz4740_rtc_ids);
 
 static struct platform_driver jz4740_rtc_driver = {
        .probe   = jz4740_rtc_probe,
@@ -440,4 +443,9 @@ static struct platform_driver jz4740_rtc_driver = {
        .id_table = jz4740_rtc_ids,
 };
 
-builtin_platform_driver(jz4740_rtc_driver);
+module_platform_driver(jz4740_rtc_driver);
+
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("RTC driver for the JZ4740 SoC\n");
+MODULE_ALIAS("platform:jz4740-rtc");
index 75f820ca17b79b0574e3afd91df18998a3438c30..27ff38f839fc1c752385f430f55020fb934ed6de 100644 (file)
@@ -1583,7 +1583,7 @@ out:
 int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
 {
        struct zfcp_qdio *qdio = wka_port->adapter->qdio;
-       struct zfcp_fsf_req *req = NULL;
+       struct zfcp_fsf_req *req;
        int retval = -EIO;
 
        spin_lock_irq(&qdio->req_q_lock);
@@ -1612,7 +1612,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
                zfcp_fsf_req_free(req);
 out:
        spin_unlock_irq(&qdio->req_q_lock);
-       if (req && !IS_ERR(req))
+       if (!retval)
                zfcp_dbf_rec_run_wka("fsowp_1", wka_port, req->req_id);
        return retval;
 }
@@ -1638,7 +1638,7 @@ static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req)
 int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
 {
        struct zfcp_qdio *qdio = wka_port->adapter->qdio;
-       struct zfcp_fsf_req *req = NULL;
+       struct zfcp_fsf_req *req;
        int retval = -EIO;
 
        spin_lock_irq(&qdio->req_q_lock);
@@ -1667,7 +1667,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
                zfcp_fsf_req_free(req);
 out:
        spin_unlock_irq(&qdio->req_q_lock);
-       if (req && !IS_ERR(req))
+       if (!retval)
                zfcp_dbf_rec_run_wka("fscwp_1", wka_port, req->req_id);
        return retval;
 }
index 4f56b1003cc7d9d8c56dcb98ead00b0fb5348788..5b48bedd7c385f3b73adb97d45f691d540914f42 100644 (file)
@@ -50,9 +50,13 @@ struct aac_common aac_config = {
 
 static inline int aac_is_msix_mode(struct aac_dev *dev)
 {
-       u32 status;
+       u32 status = 0;
 
-       status = src_readl(dev, MUnit.OMR);
+       if (dev->pdev->device == PMC_DEVICE_S6 ||
+               dev->pdev->device == PMC_DEVICE_S7 ||
+               dev->pdev->device == PMC_DEVICE_S8) {
+               status = src_readl(dev, MUnit.OMR);
+       }
        return (status & AAC_INT_MODE_MSIX);
 }
 
index f501095f91ace5c53f49018e5e2c755c0904fd61..898461b146cc20bc146471748ba71da8de271e0f 100644 (file)
@@ -74,7 +74,7 @@ static void bnx2fc_cmd_timeout(struct work_struct *work)
                                    &io_req->req_flags)) {
                        /* Handle internally generated ABTS timeout */
                        BNX2FC_IO_DBG(io_req, "ABTS timed out refcnt = %d\n",
-                                       io_req->refcount.refcount.counter);
+                                       kref_read(&io_req->refcount));
                        if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE,
                                               &io_req->req_flags))) {
                                /*
@@ -1141,7 +1141,7 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
                return SUCCESS;
        }
        BNX2FC_IO_DBG(io_req, "eh_abort - refcnt = %d\n",
-                     io_req->refcount.refcount.counter);
+                     kref_read(&io_req->refcount));
 
        /* Hold IO request across abort processing */
        kref_get(&io_req->refcount);
@@ -1299,7 +1299,7 @@ void bnx2fc_process_cleanup_compl(struct bnx2fc_cmd *io_req,
 {
        BNX2FC_IO_DBG(io_req, "Entered process_cleanup_compl "
                              "refcnt = %d, cmd_type = %d\n",
-                  io_req->refcount.refcount.counter, io_req->cmd_type);
+                  kref_read(&io_req->refcount), io_req->cmd_type);
        bnx2fc_scsi_done(io_req, DID_ERROR);
        kref_put(&io_req->refcount, bnx2fc_cmd_release);
        if (io_req->wait_for_comp)
@@ -1318,7 +1318,7 @@ void bnx2fc_process_abts_compl(struct bnx2fc_cmd *io_req,
        BNX2FC_IO_DBG(io_req, "Entered process_abts_compl xid = 0x%x"
                              "refcnt = %d, cmd_type = %d\n",
                   io_req->xid,
-                  io_req->refcount.refcount.counter, io_req->cmd_type);
+                  kref_read(&io_req->refcount), io_req->cmd_type);
 
        if (test_and_set_bit(BNX2FC_FLAG_ABTS_DONE,
                                       &io_req->req_flags)) {
index 95ba99044c3e9c2af6f0a8deb656b878c15cf656..18e0ea83d36172cf2fcc55ecfe8f3b133913f1d5 100644 (file)
@@ -301,7 +301,7 @@ static inline void __cxgbi_sock_put(const char *fn, struct cxgbi_sock *csk)
 {
        log_debug(1 << CXGBI_DBG_SOCK,
                "%s, put csk 0x%p, ref %u-1.\n",
-               fn, csk, atomic_read(&csk->refcnt.refcount));
+               fn, csk, kref_read(&csk->refcnt));
        kref_put(&csk->refcnt, cxgbi_sock_free);
 }
 #define cxgbi_sock_put(csk)    __cxgbi_sock_put(__func__, csk)
@@ -310,7 +310,7 @@ static inline void __cxgbi_sock_get(const char *fn, struct cxgbi_sock *csk)
 {
        log_debug(1 << CXGBI_DBG_SOCK,
                "%s, get csk 0x%p, ref %u+1.\n",
-               fn, csk, atomic_read(&csk->refcnt.refcount));
+               fn, csk, kref_read(&csk->refcnt));
        kref_get(&csk->refcnt);
 }
 #define cxgbi_sock_get(csk)    __cxgbi_sock_get(__func__, csk)
index 99b747cedbebc517a78714db321743f0837834b6..0f807798c6245f8ba64b5ec3cced694d6a821c31 100644 (file)
@@ -3816,6 +3816,7 @@ static struct configfs_attribute *ibmvscsis_tpg_attrs[] = {
 static const struct target_core_fabric_ops ibmvscsis_ops = {
        .module                         = THIS_MODULE,
        .name                           = "ibmvscsis",
+       .max_data_sg_nents              = MAX_TXU / PAGE_SIZE,
        .get_fabric_name                = ibmvscsis_get_fabric_name,
        .tpg_get_wwn                    = ibmvscsis_get_fabric_wwn,
        .tpg_get_tag                    = ibmvscsis_get_tag,
index a63542bac15333c24d5d4716900f6413447af696..caa7a7b0ec53b3d1319aa158c30bddf1ecdc0f19 100644 (file)
@@ -607,7 +607,7 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
                len += snprintf(buf+len, size-len, "usgmap:%x ",
                        ndlp->nlp_usg_map);
                len += snprintf(buf+len, size-len, "refcnt:%x",
-                       atomic_read(&ndlp->kref.refcount));
+                       kref_read(&ndlp->kref));
                len +=  snprintf(buf+len, size-len, "\n");
        }
        spin_unlock_irq(shost->host_lock);
index 7b6bd8ed0d0bd6fc8b056052dffbe60595015cbe..63bef456654899a44e8fdc43745eb0abc621ebb8 100644 (file)
@@ -3690,7 +3690,7 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
                lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
                                 "0006 rpi%x DID:%x flg:%x %d map:%x %p\n",
                                 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
-                                atomic_read(&ndlp->kref.refcount),
+                                kref_read(&ndlp->kref),
                                 ndlp->nlp_usg_map, ndlp);
                if (NLP_CHK_NODE_ACT(ndlp)) {
                        lpfc_nlp_put(ndlp);
index ed223937798a9d9653dda07daeb9f10c916a1b79..82047070cdc973940e042afa4ff28c7685f090f3 100644 (file)
@@ -3440,7 +3440,7 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
        lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
                         "0002 rpi:%x DID:%x flg:%x %d map:%x %p\n",
                         ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
-                        atomic_read(&ndlp->kref.refcount),
+                        kref_read(&ndlp->kref),
                         ndlp->nlp_usg_map, ndlp);
        if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
                ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
@@ -3861,7 +3861,7 @@ out:
        lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
                         "0003 rpi:%x DID:%x flg:%x %d map%x %p\n",
                         ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
-                        atomic_read(&ndlp->kref.refcount),
+                        kref_read(&ndlp->kref),
                         ndlp->nlp_usg_map, ndlp);
 
        if (vport->port_state < LPFC_VPORT_READY) {
@@ -4238,7 +4238,7 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
                                "0277 lpfc_enable_node: ndlp:x%p "
                                "usgmap:x%x refcnt:%d\n",
                                (void *)ndlp, ndlp->nlp_usg_map,
-                               atomic_read(&ndlp->kref.refcount));
+                               kref_read(&ndlp->kref));
                return NULL;
        }
        /* The ndlp should not already be in active mode */
@@ -4248,7 +4248,7 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
                                "0278 lpfc_enable_node: ndlp:x%p "
                                "usgmap:x%x refcnt:%d\n",
                                (void *)ndlp, ndlp->nlp_usg_map,
-                               atomic_read(&ndlp->kref.refcount));
+                               kref_read(&ndlp->kref));
                return NULL;
        }
 
@@ -4272,7 +4272,7 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
                                 "0008 rpi:%x DID:%x flg:%x refcnt:%d "
                                 "map:%x %p\n", ndlp->nlp_rpi, ndlp->nlp_DID,
                                 ndlp->nlp_flag,
-                                atomic_read(&ndlp->kref.refcount),
+                                kref_read(&ndlp->kref),
                                 ndlp->nlp_usg_map, ndlp);
        }
 
@@ -4546,7 +4546,7 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
                                    (bf_get(lpfc_sli_intf_if_type,
                                     &phba->sli4_hba.sli_intf) ==
                                      LPFC_SLI_INTF_IF_TYPE_2) &&
-                                   (atomic_read(&ndlp->kref.refcount) > 0)) {
+                                   (kref_read(&ndlp->kref) > 0)) {
                                        mbox->context1 = lpfc_nlp_get(ndlp);
                                        mbox->mbox_cmpl =
                                                lpfc_sli4_unreg_rpi_cmpl_clr;
@@ -4695,14 +4695,14 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
                                "0280 lpfc_cleanup_node: ndlp:x%p "
                                "usgmap:x%x refcnt:%d\n",
                                (void *)ndlp, ndlp->nlp_usg_map,
-                               atomic_read(&ndlp->kref.refcount));
+                               kref_read(&ndlp->kref));
                lpfc_dequeue_node(vport, ndlp);
        } else {
                lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
                                "0281 lpfc_cleanup_node: ndlp:x%p "
                                "usgmap:x%x refcnt:%d\n",
                                (void *)ndlp, ndlp->nlp_usg_map,
-                               atomic_read(&ndlp->kref.refcount));
+                               kref_read(&ndlp->kref));
                lpfc_disable_node(vport, ndlp);
        }
 
@@ -4791,7 +4791,7 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
                lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
                                 "0005 rpi:%x DID:%x flg:%x %d map:%x %p\n",
                                 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
-                                atomic_read(&ndlp->kref.refcount),
+                                kref_read(&ndlp->kref),
                                 ndlp->nlp_usg_map, ndlp);
                if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))
                        != NULL) {
@@ -5557,7 +5557,7 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
        lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
                         "0004 rpi:%x DID:%x flg:%x %d map:%x %p\n",
                         ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
-                        atomic_read(&ndlp->kref.refcount),
+                        kref_read(&ndlp->kref),
                         ndlp->nlp_usg_map, ndlp);
        /*
         * Start issuing Fabric-Device Management Interface (FDMI) command to
@@ -5728,7 +5728,7 @@ lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
                                 "0007 rpi:%x DID:%x flg:%x refcnt:%d "
                                 "map:%x %p\n", ndlp->nlp_rpi, ndlp->nlp_DID,
                                 ndlp->nlp_flag,
-                                atomic_read(&ndlp->kref.refcount),
+                                kref_read(&ndlp->kref),
                                 ndlp->nlp_usg_map, ndlp);
 
                ndlp->active_rrqs_xri_bitmap =
@@ -5767,7 +5767,7 @@ lpfc_nlp_release(struct kref *kref)
                        "0279 lpfc_nlp_release: ndlp:x%p did %x "
                        "usgmap:x%x refcnt:%d rpi:%x\n",
                        (void *)ndlp, ndlp->nlp_DID, ndlp->nlp_usg_map,
-                       atomic_read(&ndlp->kref.refcount), ndlp->nlp_rpi);
+                       kref_read(&ndlp->kref), ndlp->nlp_rpi);
 
        /* remove ndlp from action. */
        lpfc_nlp_remove(ndlp->vport, ndlp);
@@ -5804,7 +5804,7 @@ lpfc_nlp_get(struct lpfc_nodelist *ndlp)
                lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
                        "node get:        did:x%x flg:x%x refcnt:x%x",
                        ndlp->nlp_DID, ndlp->nlp_flag,
-                       atomic_read(&ndlp->kref.refcount));
+                       kref_read(&ndlp->kref));
                /* The check of ndlp usage to prevent incrementing the
                 * ndlp reference count that is in the process of being
                 * released.
@@ -5817,7 +5817,7 @@ lpfc_nlp_get(struct lpfc_nodelist *ndlp)
                                "0276 lpfc_nlp_get: ndlp:x%p "
                                "usgmap:x%x refcnt:%d\n",
                                (void *)ndlp, ndlp->nlp_usg_map,
-                               atomic_read(&ndlp->kref.refcount));
+                               kref_read(&ndlp->kref));
                        return NULL;
                } else
                        kref_get(&ndlp->kref);
@@ -5844,7 +5844,7 @@ lpfc_nlp_put(struct lpfc_nodelist *ndlp)
        lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
        "node put:        did:x%x flg:x%x refcnt:x%x",
                ndlp->nlp_DID, ndlp->nlp_flag,
-               atomic_read(&ndlp->kref.refcount));
+               kref_read(&ndlp->kref));
        phba = ndlp->phba;
        spin_lock_irqsave(&phba->ndlp_lock, flags);
        /* Check the ndlp memory free acknowledge flag to avoid the
@@ -5857,7 +5857,7 @@ lpfc_nlp_put(struct lpfc_nodelist *ndlp)
                                "0274 lpfc_nlp_put: ndlp:x%p "
                                "usgmap:x%x refcnt:%d\n",
                                (void *)ndlp, ndlp->nlp_usg_map,
-                               atomic_read(&ndlp->kref.refcount));
+                               kref_read(&ndlp->kref));
                return 1;
        }
        /* Check the ndlp inactivate log flag to avoid the possible
@@ -5870,7 +5870,7 @@ lpfc_nlp_put(struct lpfc_nodelist *ndlp)
                                "0275 lpfc_nlp_put: ndlp:x%p "
                                "usgmap:x%x refcnt:%d\n",
                                (void *)ndlp, ndlp->nlp_usg_map,
-                               atomic_read(&ndlp->kref.refcount));
+                               kref_read(&ndlp->kref));
                return 1;
        }
        /* For last put, mark the ndlp usage flags to make sure no
@@ -5878,7 +5878,7 @@ lpfc_nlp_put(struct lpfc_nodelist *ndlp)
         * in between the process when the final kref_put has been
         * invoked on this ndlp.
         */
-       if (atomic_read(&ndlp->kref.refcount) == 1) {
+       if (kref_read(&ndlp->kref) == 1) {
                /* Indicate ndlp is put to inactive state. */
                NLP_SET_IACT_REQ(ndlp);
                /* Acknowledge ndlp memory free has been seen. */
@@ -5906,8 +5906,8 @@ lpfc_nlp_not_used(struct lpfc_nodelist *ndlp)
        lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
                "node not used:   did:x%x flg:x%x refcnt:x%x",
                ndlp->nlp_DID, ndlp->nlp_flag,
-               atomic_read(&ndlp->kref.refcount));
-       if (atomic_read(&ndlp->kref.refcount) == 1)
+               kref_read(&ndlp->kref));
+       if (kref_read(&ndlp->kref) == 1)
                if (lpfc_nlp_put(ndlp))
                        return 1;
        return 0;
index 4776fd85514f5886a195d409c25fca6f389468da..64717c171b15576a94726e76059f434483c3df1d 100644 (file)
@@ -2660,8 +2660,7 @@ lpfc_cleanup(struct lpfc_vport *vport)
                                                "usgmap:x%x refcnt:%d\n",
                                                ndlp->nlp_DID, (void *)ndlp,
                                                ndlp->nlp_usg_map,
-                                               atomic_read(
-                                                       &ndlp->kref.refcount));
+                                               kref_read(&ndlp->kref));
                        }
                        break;
                }
index 75f3fce1c86773299704347fc0960fb5148ea53f..0b5b423b1db0d19a1a1dfb6fc38e168af4559d71 100644 (file)
@@ -51,6 +51,7 @@
 #include <linux/workqueue.h>
 #include <linux/delay.h>
 #include <linux/pci.h>
+#include <linux/pci-aspm.h>
 #include <linux/interrupt.h>
 #include <linux/aer.h>
 #include <linux/raid_class.h>
@@ -4657,6 +4658,7 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
        struct MPT3SAS_DEVICE *sas_device_priv_data;
        u32 response_code = 0;
        unsigned long flags;
+       unsigned int sector_sz;
 
        mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
        scmd = _scsih_scsi_lookup_get_clear(ioc, smid);
@@ -4715,6 +4717,20 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
        }
 
        xfer_cnt = le32_to_cpu(mpi_reply->TransferCount);
+
+       /* In case of bogus fw or device, we could end up having
+        * unaligned partial completion. We can force alignment here,
+        * then scsi-ml does not need to handle this misbehavior.
+        */
+       sector_sz = scmd->device->sector_size;
+       if (unlikely(scmd->request->cmd_type == REQ_TYPE_FS && sector_sz &&
+                    xfer_cnt % sector_sz)) {
+               sdev_printk(KERN_INFO, scmd->device,
+                   "unaligned partial completion avoided (xfer_cnt=%u, sector_sz=%u)\n",
+                           xfer_cnt, sector_sz);
+               xfer_cnt = round_down(xfer_cnt, sector_sz);
+       }
+
        scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt);
        if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
                log_info =  le32_to_cpu(mpi_reply->IOCLogInfo);
@@ -8746,6 +8762,8 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 
        switch (hba_mpi_version) {
        case MPI2_VERSION:
+               pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S |
+                       PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
                /* Use mpt2sas driver host template for SAS 2.0 HBA's */
                shost = scsi_host_alloc(&mpt2sas_driver_template,
                  sizeof(struct MPT3SAS_ADAPTER));
index dc88a09f9043c9359cba9c276e523571235c1b50..a94b0b6bd0306379b4707b29be1347d1ce89e6b8 100644 (file)
@@ -3242,7 +3242,7 @@ qla2x00_free_irqs(scsi_qla_host_t *vha)
         * from a probe failure context.
         */
        if (!ha->rsp_q_map || !ha->rsp_q_map[0])
-               return;
+               goto free_irqs;
        rsp = ha->rsp_q_map[0];
 
        if (ha->flags.msix_enabled) {
@@ -3262,6 +3262,7 @@ qla2x00_free_irqs(scsi_qla_host_t *vha)
                free_irq(pci_irq_vector(ha->pdev, 0), rsp);
        }
 
+free_irqs:
        pci_free_irq_vectors(ha->pdev);
 }
 
index 0a000ecf0881411d4c01c1a95245d1eb9d9da771..40660461a4b5c3e56e61b124385ee044547a4741 100644 (file)
@@ -1616,7 +1616,7 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
                                /* Don't abort commands in adapter during EEH
                                 * recovery as it's not accessible/responding.
                                 */
-                               if (!ha->flags.eeh_busy) {
+                               if (GET_CMD_SP(sp) && !ha->flags.eeh_busy) {
                                        /* Get a reference to the sp and drop the lock.
                                         * The reference ensures this sp->done() call
                                         * - and not the call in qla2xxx_eh_abort() -
index d925910be761dfcdc61c5c3e97bc98c4372a6cb7..3084983c128720588390a1d6e618ce1c774f8bc5 100644 (file)
@@ -371,7 +371,7 @@ static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd)
                 */
                pr_debug("write_pending aborted cmd[%p] refcount %d "
                        "transport_state %x, t_state %x, se_cmd_flags %x\n",
-                       cmd,cmd->se_cmd.cmd_kref.refcount.counter,
+                       cmd, kref_read(&cmd->se_cmd.cmd_kref),
                        cmd->se_cmd.transport_state,
                        cmd->se_cmd.t_state,
                        cmd->se_cmd.se_cmd_flags);
@@ -584,7 +584,7 @@ static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
                 */
                pr_debug("queue_data_in aborted cmd[%p] refcount %d "
                        "transport_state %x, t_state %x, se_cmd_flags %x\n",
-                       cmd,cmd->se_cmd.cmd_kref.refcount.counter,
+                       cmd, kref_read(&cmd->se_cmd.cmd_kref),
                        cmd->se_cmd.transport_state,
                        cmd->se_cmd.t_state,
                        cmd->se_cmd.se_cmd_flags);
index e9e1e141af9cd287bcca730d05a7a62d58fb644a..78db07fd8055df135dcc5488a06c443fc4b51ac8 100644 (file)
@@ -1040,7 +1040,8 @@ int scsi_init_io(struct scsi_cmnd *cmd)
        bool is_mq = (rq->mq_ctx != NULL);
        int error;
 
-       BUG_ON(!blk_rq_nr_phys_segments(rq));
+       if (WARN_ON_ONCE(!blk_rq_nr_phys_segments(rq)))
+               return -EINVAL;
 
        error = scsi_init_sgtable(rq, &cmd->sdb);
        if (error)
index 0b09638fa39be80768701f991cceccc1bee9988f..1f5d92a25a49dd0f928c194c5d64372fb2b071f6 100644 (file)
@@ -836,6 +836,7 @@ static int sd_setup_write_same_cmnd(struct scsi_cmnd *cmd)
        struct bio *bio = rq->bio;
        sector_t sector = blk_rq_pos(rq);
        unsigned int nr_sectors = blk_rq_sectors(rq);
+       unsigned int nr_bytes = blk_rq_bytes(rq);
        int ret;
 
        if (sdkp->device->no_write_same)
@@ -868,7 +869,21 @@ static int sd_setup_write_same_cmnd(struct scsi_cmnd *cmd)
 
        cmd->transfersize = sdp->sector_size;
        cmd->allowed = SD_MAX_RETRIES;
-       return scsi_init_io(cmd);
+
+       /*
+        * For WRITE SAME the data transferred via the DATA OUT buffer is
+        * different from the amount of data actually written to the target.
+        *
+        * We set up __data_len to the amount of data transferred via the
+        * DATA OUT buffer so that blk_rq_map_sg sets up the proper S/G list
+        * to transfer a single sector of data first, but then reset it to
+        * the amount of data to be written right after so that the I/O path
+        * knows how much to actually write.
+        */
+       rq->__data_len = sdp->sector_size;
+       ret = scsi_init_io(cmd);
+       rq->__data_len = nr_bytes;
+       return ret;
 }
 
 static int sd_setup_flush_cmnd(struct scsi_cmnd *cmd)
index dbe5b4b95df0d9d317dbdc2261914e4d8771991f..121de0aaa6adaa1e9a71376893072c1f69ddc228 100644 (file)
@@ -1753,6 +1753,10 @@ sg_start_req(Sg_request *srp, unsigned char *cmd)
                        return res;
 
                iov_iter_truncate(&i, hp->dxfer_len);
+               if (!iov_iter_count(&i)) {
+                       kfree(iov);
+                       return -EINVAL;
+               }
 
                res = blk_rq_map_user_iov(q, rq, md, &i, GFP_ATOMIC);
                kfree(iov);
index ec91bd07f00a307337283cbea6f72c6e370a0170..c680d76413116c00b80193f5e7db9de2e13441b1 100644 (file)
@@ -534,7 +534,9 @@ static int virtscsi_queuecommand(struct virtio_scsi *vscsi,
 {
        struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
        struct virtio_scsi_cmd *cmd = scsi_cmd_priv(sc);
+       unsigned long flags;
        int req_size;
+       int ret;
 
        BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize);
 
@@ -562,8 +564,15 @@ static int virtscsi_queuecommand(struct virtio_scsi *vscsi,
                req_size = sizeof(cmd->req.cmd);
        }
 
-       if (virtscsi_kick_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd)) != 0)
+       ret = virtscsi_kick_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd));
+       if (ret == -EIO) {
+               cmd->resp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET;
+               spin_lock_irqsave(&req_vq->vq_lock, flags);
+               virtscsi_complete_cmd(vscsi, cmd);
+               spin_unlock_irqrestore(&req_vq->vq_lock, flags);
+       } else if (ret != 0) {
                return SCSI_MLQUEUE_HOST_BUSY;
+       }
        return 0;
 }
 
index 2922a9908302d84781f63d1b091ca6e4ddb2eba8..25ae7f2e44b51717251c068e3a9f85288c614093 100644 (file)
@@ -162,7 +162,8 @@ config SPI_BCM63XX_HSSPI
 
 config SPI_BCM_QSPI
        tristate "Broadcom BSPI and MSPI controller support"
-       depends on ARCH_BRCMSTB || ARCH_BCM || ARCH_BCM_IPROC || COMPILE_TEST
+       depends on ARCH_BRCMSTB || ARCH_BCM || ARCH_BCM_IPROC || \
+                       BMIPS_GENERIC || COMPILE_TEST
        default ARCH_BCM_IPROC
        help
          Enables support for the Broadcom SPI flash and MSPI controller.
@@ -263,7 +264,7 @@ config SPI_EP93XX
          mode.
 
 config SPI_FALCON
-       tristate "Falcon SPI controller support"
+       bool "Falcon SPI controller support"
        depends on SOC_FALCON
        help
          The external bus unit (EBU) found on the FALC-ON SoC has SPI
@@ -416,6 +417,14 @@ config SPI_NUC900
        help
          SPI driver for Nuvoton NUC900 series ARM SoCs
 
+config SPI_LANTIQ_SSC
+       tristate "Lantiq SSC SPI controller"
+       depends on LANTIQ || COMPILE_TEST
+       help
+         This driver supports the Lantiq SSC SPI controller in master
+         mode. This controller is found on Intel (former Lantiq) SoCs like
+         the Danube, Falcon, xRX200, xRX300.
+
 config SPI_OC_TINY
        tristate "OpenCores tiny SPI"
        depends on GPIOLIB || COMPILE_TEST
index 7a6b64662c82c71106f3af94a61190ba1f98a218..b375a7a892160b76b5bf62a7f66f86484f300ed1 100644 (file)
@@ -49,6 +49,7 @@ obj-$(CONFIG_SPI_FSL_SPI)             += spi-fsl-spi.o
 obj-$(CONFIG_SPI_GPIO)                 += spi-gpio.o
 obj-$(CONFIG_SPI_IMG_SPFI)             += spi-img-spfi.o
 obj-$(CONFIG_SPI_IMX)                  += spi-imx.o
+obj-$(CONFIG_SPI_LANTIQ_SSC)           += spi-lantiq-ssc.o
 obj-$(CONFIG_SPI_JCORE)                        += spi-jcore.o
 obj-$(CONFIG_SPI_LM70_LLP)             += spi-lm70llp.o
 obj-$(CONFIG_SPI_LP8841_RTC)           += spi-lp8841-rtc.o
index 0314c6b9e04415b0cb792d8e9a4048a6311fd97d..6c7d7a460689917d577973f7bb47a1e408470210 100644 (file)
@@ -170,12 +170,12 @@ static int a3700_spi_pin_mode_set(struct a3700_spi *a3700_spi,
        val &= ~(A3700_SPI_DATA_PIN0 | A3700_SPI_DATA_PIN1);
 
        switch (pin_mode) {
-       case 1:
+       case SPI_NBITS_SINGLE:
                break;
-       case 2:
+       case SPI_NBITS_DUAL:
                val |= A3700_SPI_DATA_PIN0;
                break;
-       case 4:
+       case SPI_NBITS_QUAD:
                val |= A3700_SPI_DATA_PIN1;
                break;
        default:
@@ -340,8 +340,7 @@ static irqreturn_t a3700_spi_interrupt(int irq, void *dev_id)
        spireg_write(a3700_spi, A3700_SPI_INT_STAT_REG, cause);
 
        /* Wake up the transfer */
-       if (a3700_spi->wait_mask & cause)
-               complete(&a3700_spi->done);
+       complete(&a3700_spi->done);
 
        return IRQ_HANDLED;
 }
@@ -421,7 +420,7 @@ static void a3700_spi_fifo_thres_set(struct a3700_spi *a3700_spi,
 }
 
 static void a3700_spi_transfer_setup(struct spi_device *spi,
-                                   struct spi_transfer *xfer)
+                                    struct spi_transfer *xfer)
 {
        struct a3700_spi *a3700_spi;
        unsigned int byte_len;
@@ -562,6 +561,7 @@ static int a3700_spi_fifo_read(struct a3700_spi *a3700_spi)
                val = spireg_read(a3700_spi, A3700_SPI_DATA_IN_REG);
                if (a3700_spi->buf_len >= 4) {
                        u32 data = le32_to_cpu(val);
+
                        memcpy(a3700_spi->rx_buf, &data, 4);
 
                        a3700_spi->buf_len -= 4;
@@ -901,7 +901,6 @@ static int a3700_spi_remove(struct platform_device *pdev)
        struct a3700_spi *spi = spi_master_get_devdata(master);
 
        clk_unprepare(spi->clk);
-       spi_master_put(master);
 
        return 0;
 }
@@ -909,7 +908,6 @@ static int a3700_spi_remove(struct platform_device *pdev)
 static struct platform_driver a3700_spi_driver = {
        .driver = {
                .name   = DRIVER_NAME,
-               .owner  = THIS_MODULE,
                .of_match_table = of_match_ptr(a3700_spi_dt_ids),
        },
        .probe          = a3700_spi_probe,
index f369174fbd886fbbc09a13af5f9dbce66d498635..b89cee11f41815be61876bb9206e59cb4d6714b1 100644 (file)
@@ -78,14 +78,16 @@ static void ath79_spi_chipselect(struct spi_device *spi, int is_active)
                ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, sp->ioc_base);
        }
 
-       if (spi->chip_select) {
+       if (gpio_is_valid(spi->cs_gpio)) {
                /* SPI is normally active-low */
-               gpio_set_value(spi->cs_gpio, cs_high);
+               gpio_set_value_cansleep(spi->cs_gpio, cs_high);
        } else {
+               u32 cs_bit = AR71XX_SPI_IOC_CS(spi->chip_select);
+
                if (cs_high)
-                       sp->ioc_base |= AR71XX_SPI_IOC_CS0;
+                       sp->ioc_base |= cs_bit;
                else
-                       sp->ioc_base &= ~AR71XX_SPI_IOC_CS0;
+                       sp->ioc_base &= ~cs_bit;
 
                ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, sp->ioc_base);
        }
@@ -118,11 +120,8 @@ static int ath79_spi_setup_cs(struct spi_device *spi)
        struct ath79_spi *sp = ath79_spidev_to_sp(spi);
        int status;
 
-       if (spi->chip_select && !gpio_is_valid(spi->cs_gpio))
-               return -EINVAL;
-
        status = 0;
-       if (spi->chip_select) {
+       if (gpio_is_valid(spi->cs_gpio)) {
                unsigned long flags;
 
                flags = GPIOF_DIR_OUT;
@@ -134,10 +133,12 @@ static int ath79_spi_setup_cs(struct spi_device *spi)
                status = gpio_request_one(spi->cs_gpio, flags,
                                          dev_name(&spi->dev));
        } else {
+               u32 cs_bit = AR71XX_SPI_IOC_CS(spi->chip_select);
+
                if (spi->mode & SPI_CS_HIGH)
-                       sp->ioc_base &= ~AR71XX_SPI_IOC_CS0;
+                       sp->ioc_base &= ~cs_bit;
                else
-                       sp->ioc_base |= AR71XX_SPI_IOC_CS0;
+                       sp->ioc_base |= cs_bit;
 
                ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, sp->ioc_base);
        }
@@ -147,7 +148,7 @@ static int ath79_spi_setup_cs(struct spi_device *spi)
 
 static void ath79_spi_cleanup_cs(struct spi_device *spi)
 {
-       if (spi->chip_select) {
+       if (gpio_is_valid(spi->cs_gpio)) {
                gpio_free(spi->cs_gpio);
        }
 }
index 14f9dea3173fc5fd9298c7188bf836a3a95f383d..b19722ba908c1be3855e42d21b25f2e6fb38eae8 100644 (file)
@@ -89,7 +89,7 @@
 #define BSPI_BPP_MODE_SELECT_MASK              BIT(8)
 #define BSPI_BPP_ADDR_SELECT_MASK              BIT(16)
 
-#define BSPI_READ_LENGTH                       256
+#define BSPI_READ_LENGTH                       512
 
 /* MSPI register offsets */
 #define MSPI_SPCR0_LSB                         0x000
@@ -192,9 +192,11 @@ struct bcm_qspi_dev_id {
        void *dev;
 };
 
+
 struct qspi_trans {
        struct spi_transfer *trans;
        int byte;
+       bool mspi_last_trans;
 };
 
 struct bcm_qspi {
@@ -371,7 +373,7 @@ static int bcm_qspi_bspi_set_flex_mode(struct bcm_qspi *qspi, int width,
                        /* default mode, does not need flex_cmd */
                        flex_mode = 0;
                else
-                       command = SPINOR_OP_READ4_FAST;
+                       command = SPINOR_OP_READ_FAST_4B;
                break;
        case SPI_NBITS_DUAL:
                bpc = 0x00000001;
@@ -384,7 +386,7 @@ static int bcm_qspi_bspi_set_flex_mode(struct bcm_qspi *qspi, int width,
                } else {
                        command = SPINOR_OP_READ_1_1_2;
                        if (spans_4byte)
-                               command = SPINOR_OP_READ4_1_1_2;
+                               command = SPINOR_OP_READ_1_1_2_4B;
                }
                break;
        case SPI_NBITS_QUAD:
@@ -399,7 +401,7 @@ static int bcm_qspi_bspi_set_flex_mode(struct bcm_qspi *qspi, int width,
                } else {
                        command = SPINOR_OP_READ_1_1_4;
                        if (spans_4byte)
-                               command = SPINOR_OP_READ4_1_1_4;
+                               command = SPINOR_OP_READ_1_1_4_4B;
                }
                break;
        default:
@@ -616,6 +618,16 @@ static int bcm_qspi_setup(struct spi_device *spi)
        return 0;
 }
 
+static bool bcm_qspi_mspi_transfer_is_last(struct bcm_qspi *qspi,
+                                          struct qspi_trans *qt)
+{
+       if (qt->mspi_last_trans &&
+           spi_transfer_is_last(qspi->master, qt->trans))
+               return true;
+       else
+               return false;
+}
+
 static int update_qspi_trans_byte_count(struct bcm_qspi *qspi,
                                        struct qspi_trans *qt, int flags)
 {
@@ -629,7 +641,6 @@ static int update_qspi_trans_byte_count(struct bcm_qspi *qspi,
 
        if (qt->byte >= qt->trans->len) {
                /* we're at the end of the spi_transfer */
-
                /* in TX mode, need to pause for a delay or CS change */
                if (qt->trans->delay_usecs &&
                    (flags & TRANS_STATUS_BREAK_DELAY))
@@ -641,7 +652,7 @@ static int update_qspi_trans_byte_count(struct bcm_qspi *qspi,
                        goto done;
 
                dev_dbg(&qspi->pdev->dev, "advance msg exit\n");
-               if (spi_transfer_is_last(qspi->master, qt->trans))
+               if (bcm_qspi_mspi_transfer_is_last(qspi, qt))
                        ret = TRANS_STATUS_BREAK_EOM;
                else
                        ret = TRANS_STATUS_BREAK_NO_BYTES;
@@ -813,7 +824,7 @@ static int bcm_qspi_bspi_flash_read(struct spi_device *spi,
                                    struct spi_flash_read_message *msg)
 {
        struct bcm_qspi *qspi = spi_master_get_devdata(spi->master);
-       u32 addr = 0, len, len_words;
+       u32 addr = 0, len, rdlen, len_words;
        int ret = 0;
        unsigned long timeo = msecs_to_jiffies(100);
        struct bcm_qspi_soc_intc *soc_intc = qspi->soc_intc;
@@ -826,7 +837,7 @@ static int bcm_qspi_bspi_flash_read(struct spi_device *spi,
        bcm_qspi_write(qspi, MSPI, MSPI_WRITE_LOCK, 0);
 
        /*
-        * when using flex mode mode we need to send
+        * when using flex mode we need to send
         * the upper address byte to bspi
         */
        if (bcm_qspi_bspi_ver_three(qspi) == false) {
@@ -840,48 +851,127 @@ static int bcm_qspi_bspi_flash_read(struct spi_device *spi,
        else
                addr = msg->from & 0x00ffffff;
 
-       /* set BSPI RAF buffer max read length */
-       len = msg->len;
-       if (len > BSPI_READ_LENGTH)
-               len = BSPI_READ_LENGTH;
-
        if (bcm_qspi_bspi_ver_three(qspi) == true)
                addr = (addr + 0xc00000) & 0xffffff;
 
-       reinit_completion(&qspi->bspi_done);
-       bcm_qspi_enable_bspi(qspi);
-       len_words = (len + 3) >> 2;
-       qspi->bspi_rf_msg = msg;
-       qspi->bspi_rf_msg_status = 0;
+       /*
+        * read into the entire buffer by breaking the reads
+        * into RAF buffer read lengths
+        */
+       len = msg->len;
        qspi->bspi_rf_msg_idx = 0;
-       qspi->bspi_rf_msg_len = len;
-       dev_dbg(&qspi->pdev->dev, "bspi xfr addr 0x%x len 0x%x", addr, len);
 
-       bcm_qspi_write(qspi, BSPI, BSPI_RAF_START_ADDR, addr);
-       bcm_qspi_write(qspi, BSPI, BSPI_RAF_NUM_WORDS, len_words);
-       bcm_qspi_write(qspi, BSPI, BSPI_RAF_WATERMARK, 0);
+       do {
+               if (len > BSPI_READ_LENGTH)
+                       rdlen = BSPI_READ_LENGTH;
+               else
+                       rdlen = len;
+
+               reinit_completion(&qspi->bspi_done);
+               bcm_qspi_enable_bspi(qspi);
+               len_words = (rdlen + 3) >> 2;
+               qspi->bspi_rf_msg = msg;
+               qspi->bspi_rf_msg_status = 0;
+               qspi->bspi_rf_msg_len = rdlen;
+               dev_dbg(&qspi->pdev->dev,
+                       "bspi xfr addr 0x%x len 0x%x", addr, rdlen);
+               bcm_qspi_write(qspi, BSPI, BSPI_RAF_START_ADDR, addr);
+               bcm_qspi_write(qspi, BSPI, BSPI_RAF_NUM_WORDS, len_words);
+               bcm_qspi_write(qspi, BSPI, BSPI_RAF_WATERMARK, 0);
+               if (qspi->soc_intc) {
+                       /*
+                        * clear soc MSPI and BSPI interrupts and enable
+                        * BSPI interrupts.
+                        */
+                       soc_intc->bcm_qspi_int_ack(soc_intc, MSPI_BSPI_DONE);
+                       soc_intc->bcm_qspi_int_set(soc_intc, BSPI_DONE, true);
+               }
 
-       if (qspi->soc_intc) {
-               /*
-                * clear soc MSPI and BSPI interrupts and enable
-                * BSPI interrupts.
-                */
-               soc_intc->bcm_qspi_int_ack(soc_intc, MSPI_BSPI_DONE);
-               soc_intc->bcm_qspi_int_set(soc_intc, BSPI_DONE, true);
+               /* Must flush previous writes before starting BSPI operation */
+               mb();
+               bcm_qspi_bspi_lr_start(qspi);
+               if (!wait_for_completion_timeout(&qspi->bspi_done, timeo)) {
+                       dev_err(&qspi->pdev->dev, "timeout waiting for BSPI\n");
+                       ret = -ETIMEDOUT;
+                       break;
+               }
+
+               /* set msg return length */
+               msg->retlen += rdlen;
+               addr += rdlen;
+               len -= rdlen;
+       } while (len);
+
+       return ret;
+}
+
+static int bcm_qspi_transfer_one(struct spi_master *master,
+                                struct spi_device *spi,
+                                struct spi_transfer *trans)
+{
+       struct bcm_qspi *qspi = spi_master_get_devdata(master);
+       int slots;
+       unsigned long timeo = msecs_to_jiffies(100);
+
+       bcm_qspi_chip_select(qspi, spi->chip_select);
+       qspi->trans_pos.trans = trans;
+       qspi->trans_pos.byte = 0;
+
+       while (qspi->trans_pos.byte < trans->len) {
+               reinit_completion(&qspi->mspi_done);
+
+               slots = write_to_hw(qspi, spi);
+               if (!wait_for_completion_timeout(&qspi->mspi_done, timeo)) {
+                       dev_err(&qspi->pdev->dev, "timeout waiting for MSPI\n");
+                       return -ETIMEDOUT;
+               }
+
+               read_from_hw(qspi, slots);
        }
 
-       /* Must flush previous writes before starting BSPI operation */
-       mb();
+       return 0;
+}
 
-       bcm_qspi_bspi_lr_start(qspi);
-       if (!wait_for_completion_timeout(&qspi->bspi_done, timeo)) {
-               dev_err(&qspi->pdev->dev, "timeout waiting for BSPI\n");
-               ret = -ETIMEDOUT;
-       } else {
-               /* set the return length for the caller */
-               msg->retlen = len;
+static int bcm_qspi_mspi_flash_read(struct spi_device *spi,
+                                   struct spi_flash_read_message *msg)
+{
+       struct bcm_qspi *qspi = spi_master_get_devdata(spi->master);
+       struct spi_transfer t[2];
+       u8 cmd[6];
+       int ret;
+
+       memset(cmd, 0, sizeof(cmd));
+       memset(t, 0, sizeof(t));
+
+       /* tx */
+       /* opcode is in cmd[0] */
+       cmd[0] = msg->read_opcode;
+       cmd[1] = msg->from >> (msg->addr_width * 8 -  8);
+       cmd[2] = msg->from >> (msg->addr_width * 8 - 16);
+       cmd[3] = msg->from >> (msg->addr_width * 8 - 24);
+       cmd[4] = msg->from >> (msg->addr_width * 8 - 32);
+       t[0].tx_buf = cmd;
+       t[0].len = msg->addr_width + msg->dummy_bytes + 1;
+       t[0].bits_per_word = spi->bits_per_word;
+       t[0].tx_nbits = msg->opcode_nbits;
+       /* lets mspi know that this is not last transfer */
+       qspi->trans_pos.mspi_last_trans = false;
+       ret = bcm_qspi_transfer_one(spi->master, spi, &t[0]);
+
+       /* rx */
+       qspi->trans_pos.mspi_last_trans = true;
+       if (!ret) {
+               /* rx */
+               t[1].rx_buf = msg->buf;
+               t[1].len = msg->len;
+               t[1].rx_nbits =  msg->data_nbits;
+               t[1].bits_per_word = spi->bits_per_word;
+               ret = bcm_qspi_transfer_one(spi->master, spi, &t[1]);
        }
 
+       if (!ret)
+               msg->retlen = msg->len;
+
        return ret;
 }
 
@@ -918,8 +1008,7 @@ static int bcm_qspi_flash_read(struct spi_device *spi,
                mspi_read = true;
 
        if (mspi_read)
-               /* this will make the m25p80 read to fallback to mspi read */
-               return -EAGAIN;
+               return bcm_qspi_mspi_flash_read(spi, msg);
 
        io_width = msg->data_nbits ? msg->data_nbits : SPI_NBITS_SINGLE;
        addrlen = msg->addr_width;
@@ -931,33 +1020,6 @@ static int bcm_qspi_flash_read(struct spi_device *spi,
        return ret;
 }
 
-static int bcm_qspi_transfer_one(struct spi_master *master,
-                                struct spi_device *spi,
-                                struct spi_transfer *trans)
-{
-       struct bcm_qspi *qspi = spi_master_get_devdata(master);
-       int slots;
-       unsigned long timeo = msecs_to_jiffies(100);
-
-       bcm_qspi_chip_select(qspi, spi->chip_select);
-       qspi->trans_pos.trans = trans;
-       qspi->trans_pos.byte = 0;
-
-       while (qspi->trans_pos.byte < trans->len) {
-               reinit_completion(&qspi->mspi_done);
-
-               slots = write_to_hw(qspi, spi);
-               if (!wait_for_completion_timeout(&qspi->mspi_done, timeo)) {
-                       dev_err(&qspi->pdev->dev, "timeout waiting for MSPI\n");
-                       return -ETIMEDOUT;
-               }
-
-               read_from_hw(qspi, slots);
-       }
-
-       return 0;
-}
-
 static void bcm_qspi_cleanup(struct spi_device *spi)
 {
        struct bcm_qspi_parms *xp = spi_get_ctldata(spi);
@@ -1187,6 +1249,7 @@ int bcm_qspi_probe(struct platform_device *pdev,
        qspi->pdev = pdev;
        qspi->trans_pos.trans = NULL;
        qspi->trans_pos.byte = 0;
+       qspi->trans_pos.mspi_last_trans = true;
        qspi->master = master;
 
        master->bus_num = -1;
@@ -1345,7 +1408,6 @@ int bcm_qspi_remove(struct platform_device *pdev)
 {
        struct bcm_qspi *qspi = platform_get_drvdata(pdev);
 
-       platform_set_drvdata(pdev, NULL);
        bcm_qspi_hw_uninit(qspi);
        clk_disable_unprepare(qspi->clk);
        kfree(qspi->dev_ids);
index afb51699dbb5a6953821189ed03a9bfbfa8d9b7a..6e409eabe1c938224b9907f0fbe3fe86679eeb98 100644 (file)
@@ -1,3 +1,11 @@
+/*
+ * Copyright (C) 2014-2016 RafaÅ‚ MiÅ‚ecki <rafal@milecki.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
 #define pr_fmt(fmt)            KBUILD_MODNAME ": " fmt
 
 #include <linux/kernel.h>
@@ -275,10 +283,6 @@ static int bcm53xxspi_flash_read(struct spi_device *spi,
  * BCMA
  **************************************************/
 
-static struct spi_board_info bcm53xx_info = {
-       .modalias       = "bcm53xxspiflash",
-};
-
 static const struct bcma_device_id bcm53xxspi_bcma_tbl[] = {
        BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_NS_QSPI, BCMA_ANY_REV, BCMA_ANY_CLASS),
        {},
@@ -311,6 +315,7 @@ static int bcm53xxspi_bcma_probe(struct bcma_device *core)
        b53spi->bspi = true;
        bcm53xxspi_disable_bspi(b53spi);
 
+       master->dev.of_node = dev->of_node;
        master->transfer_one = bcm53xxspi_transfer_one;
        if (b53spi->mmio_base)
                master->spi_flash_read = bcm53xxspi_flash_read;
@@ -324,9 +329,6 @@ static int bcm53xxspi_bcma_probe(struct bcma_device *core)
                return err;
        }
 
-       /* Broadcom SoCs (at least with the CC rev 42) use SPI for flash only */
-       spi_new_device(master, &bcm53xx_info);
-
        return 0;
 }
 
@@ -361,4 +363,4 @@ module_exit(bcm53xxspi_module_exit);
 
 MODULE_DESCRIPTION("Broadcom BCM53xx SPI Controller driver");
 MODULE_AUTHOR("RafaÅ‚ MiÅ‚ecki <zajec5@gmail.com>");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
index 054012f875671b995141f8c549021389f21f454f..b217c22ff72fe9a2284f59f8c1cd62aca58680aa 100644 (file)
@@ -107,9 +107,9 @@ static const struct file_operations dw_spi_regs_ops = {
 
 static int dw_spi_debugfs_init(struct dw_spi *dws)
 {
-       char name[128];
+       char name[32];
 
-       snprintf(name, 128, "dw_spi-%s", dev_name(&dws->master->dev));
+       snprintf(name, 32, "dw_spi%d", dws->master->bus_num);
        dws->debugfs = debugfs_create_dir(name, NULL);
        if (!dws->debugfs)
                return -ENOMEM;
@@ -486,9 +486,9 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
        dws->type = SSI_MOTO_SPI;
        dws->dma_inited = 0;
        dws->dma_addr = (dma_addr_t)(dws->paddr + DW_SPI_DR);
-       snprintf(dws->name, sizeof(dws->name), "dw_spi%d", dws->bus_num);
 
-       ret = request_irq(dws->irq, dw_spi_irq, IRQF_SHARED, dws->name, master);
+       ret = request_irq(dws->irq, dw_spi_irq, IRQF_SHARED, dev_name(dev),
+                         master);
        if (ret < 0) {
                dev_err(dev, "can not get IRQ\n");
                goto err_free_master;
index c21ca02f8ec5fcb1d6ca74ee15f43028d03044c7..da5eab62df3477dc4ceb66bed98457373bef3fe8 100644 (file)
@@ -101,7 +101,6 @@ struct dw_spi_dma_ops {
 struct dw_spi {
        struct spi_master       *master;
        enum dw_ssi_type        type;
-       char                    name[16];
 
        void __iomem            *regs;
        unsigned long           paddr;
index 17a6387e20b5fb0f9cf673cfb4cc8e4322ea9fed..b5d766064b7b59390d50e888ad0832605f47699f 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/platform_device.h>
 #include <linux/sched.h>
 #include <linux/scatterlist.h>
+#include <linux/gpio.h>
 #include <linux/spi/spi.h>
 
 #include <linux/platform_data/dma-ep93xx.h>
@@ -107,16 +108,6 @@ struct ep93xx_spi {
        void                            *zeropage;
 };
 
-/**
- * struct ep93xx_spi_chip - SPI device hardware settings
- * @spi: back pointer to the SPI device
- * @ops: private chip operations
- */
-struct ep93xx_spi_chip {
-       const struct spi_device         *spi;
-       struct ep93xx_spi_chip_ops      *ops;
-};
-
 /* converts bits per word to CR0.DSS value */
 #define bits_per_word_to_dss(bpw)      ((bpw) - 1)
 
@@ -229,104 +220,36 @@ static int ep93xx_spi_calc_divisors(const struct ep93xx_spi *espi,
        return -EINVAL;
 }
 
-static void ep93xx_spi_cs_control(struct spi_device *spi, bool control)
-{
-       struct ep93xx_spi_chip *chip = spi_get_ctldata(spi);
-       int value = (spi->mode & SPI_CS_HIGH) ? control : !control;
-
-       if (chip->ops && chip->ops->cs_control)
-               chip->ops->cs_control(spi, value);
-}
-
-/**
- * ep93xx_spi_setup() - setup an SPI device
- * @spi: SPI device to setup
- *
- * This function sets up SPI device mode, speed etc. Can be called multiple
- * times for a single device. Returns %0 in case of success, negative error in
- * case of failure. When this function returns success, the device is
- * deselected.
- */
-static int ep93xx_spi_setup(struct spi_device *spi)
+static void ep93xx_spi_cs_control(struct spi_device *spi, bool enable)
 {
-       struct ep93xx_spi *espi = spi_master_get_devdata(spi->master);
-       struct ep93xx_spi_chip *chip;
+       if (spi->mode & SPI_CS_HIGH)
+               enable = !enable;
 
-       chip = spi_get_ctldata(spi);
-       if (!chip) {
-               dev_dbg(&espi->pdev->dev, "initial setup for %s\n",
-                       spi->modalias);
-
-               chip = kzalloc(sizeof(*chip), GFP_KERNEL);
-               if (!chip)
-                       return -ENOMEM;
-
-               chip->spi = spi;
-               chip->ops = spi->controller_data;
-
-               if (chip->ops && chip->ops->setup) {
-                       int ret = chip->ops->setup(spi);
-
-                       if (ret) {
-                               kfree(chip);
-                               return ret;
-                       }
-               }
-
-               spi_set_ctldata(spi, chip);
-       }
-
-       ep93xx_spi_cs_control(spi, false);
-       return 0;
+       if (gpio_is_valid(spi->cs_gpio))
+               gpio_set_value(spi->cs_gpio, !enable);
 }
 
-/**
- * ep93xx_spi_cleanup() - cleans up master controller specific state
- * @spi: SPI device to cleanup
- *
- * This function releases master controller specific state for given @spi
- * device.
- */
-static void ep93xx_spi_cleanup(struct spi_device *spi)
-{
-       struct ep93xx_spi_chip *chip;
-
-       chip = spi_get_ctldata(spi);
-       if (chip) {
-               if (chip->ops && chip->ops->cleanup)
-                       chip->ops->cleanup(spi);
-               spi_set_ctldata(spi, NULL);
-               kfree(chip);
-       }
-}
-
-/**
- * ep93xx_spi_chip_setup() - configures hardware according to given @chip
- * @espi: ep93xx SPI controller struct
- * @chip: chip specific settings
- * @speed_hz: transfer speed
- * @bits_per_word: transfer bits_per_word
- */
 static int ep93xx_spi_chip_setup(const struct ep93xx_spi *espi,
-                                const struct ep93xx_spi_chip *chip,
-                                u32 speed_hz, u8 bits_per_word)
+                                struct spi_device *spi,
+                                struct spi_transfer *xfer)
 {
-       u8 dss = bits_per_word_to_dss(bits_per_word);
+       u8 dss = bits_per_word_to_dss(xfer->bits_per_word);
        u8 div_cpsr = 0;
        u8 div_scr = 0;
        u16 cr0;
        int err;
 
-       err = ep93xx_spi_calc_divisors(espi, speed_hz, &div_cpsr, &div_scr);
+       err = ep93xx_spi_calc_divisors(espi, xfer->speed_hz,
+                                      &div_cpsr, &div_scr);
        if (err)
                return err;
 
        cr0 = div_scr << SSPCR0_SCR_SHIFT;
-       cr0 |= (chip->spi->mode & (SPI_CPHA|SPI_CPOL)) << SSPCR0_MODE_SHIFT;
+       cr0 |= (spi->mode & (SPI_CPHA | SPI_CPOL)) << SSPCR0_MODE_SHIFT;
        cr0 |= dss;
 
        dev_dbg(&espi->pdev->dev, "setup: mode %d, cpsr %d, scr %d, dss %d\n",
-               chip->spi->mode, div_cpsr, div_scr, dss);
+               spi->mode, div_cpsr, div_scr, dss);
        dev_dbg(&espi->pdev->dev, "setup: cr0 %#x\n", cr0);
 
        ep93xx_spi_write_u8(espi, SSPCPSR, div_cpsr);
@@ -603,12 +526,11 @@ static void ep93xx_spi_process_transfer(struct ep93xx_spi *espi,
                                        struct spi_message *msg,
                                        struct spi_transfer *t)
 {
-       struct ep93xx_spi_chip *chip = spi_get_ctldata(msg->spi);
        int err;
 
        msg->state = t;
 
-       err = ep93xx_spi_chip_setup(espi, chip, t->speed_hz, t->bits_per_word);
+       err = ep93xx_spi_chip_setup(espi, msg->spi, t);
        if (err) {
                dev_err(&espi->pdev->dev,
                        "failed to setup chip for transfer\n");
@@ -863,8 +785,13 @@ static int ep93xx_spi_probe(struct platform_device *pdev)
        struct resource *res;
        int irq;
        int error;
+       int i;
 
        info = dev_get_platdata(&pdev->dev);
+       if (!info) {
+               dev_err(&pdev->dev, "missing platform data\n");
+               return -EINVAL;
+       }
 
        irq = platform_get_irq(pdev, 0);
        if (irq < 0) {
@@ -882,14 +809,36 @@ static int ep93xx_spi_probe(struct platform_device *pdev)
        if (!master)
                return -ENOMEM;
 
-       master->setup = ep93xx_spi_setup;
        master->transfer_one_message = ep93xx_spi_transfer_one_message;
-       master->cleanup = ep93xx_spi_cleanup;
        master->bus_num = pdev->id;
-       master->num_chipselect = info->num_chipselect;
        master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
        master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
 
+       master->num_chipselect = info->num_chipselect;
+       master->cs_gpios = devm_kzalloc(&master->dev,
+                                       sizeof(int) * master->num_chipselect,
+                                       GFP_KERNEL);
+       if (!master->cs_gpios) {
+               error = -ENOMEM;
+               goto fail_release_master;
+       }
+
+       for (i = 0; i < master->num_chipselect; i++) {
+               master->cs_gpios[i] = info->chipselect[i];
+
+               if (!gpio_is_valid(master->cs_gpios[i]))
+                       continue;
+
+               error = devm_gpio_request_one(&pdev->dev, master->cs_gpios[i],
+                                             GPIOF_OUT_INIT_HIGH,
+                                             "ep93xx-spi");
+               if (error) {
+                       dev_err(&pdev->dev, "could not request cs gpio %d\n",
+                               master->cs_gpios[i]);
+                       goto fail_release_master;
+               }
+       }
+
        platform_set_drvdata(pdev, master);
 
        espi = spi_master_get_devdata(master);
index 52551f6d0c7ddf71aaf03ecc18c5caf2f804108b..cb3c73007ca15a79dc4283c8a07d065118cbca24 100644 (file)
@@ -366,7 +366,7 @@ static int fsl_lpspi_transfer_one_msg(struct spi_master *master,
        struct spi_transfer *xfer;
        bool is_first_xfer = true;
        u32 temp;
-       int ret;
+       int ret = 0;
 
        msg->status = 0;
        msg->actual_length = 0;
@@ -512,9 +512,9 @@ static int fsl_lpspi_remove(struct platform_device *pdev)
 
 static struct platform_driver fsl_lpspi_driver = {
        .driver = {
-                  .name = DRIVER_NAME,
-                  .of_match_table = fsl_lpspi_dt_ids,
-                  },
+               .name = DRIVER_NAME,
+               .of_match_table = fsl_lpspi_dt_ids,
+       },
        .probe = fsl_lpspi_probe,
        .remove = fsl_lpspi_remove,
 };
index 8b290d9d7935044640da36688e82fd01e90bf114..0fc3452652aee4fad412d74398ff6dd13f1ceb96 100644 (file)
@@ -267,10 +267,9 @@ static int fsl_spi_setup_transfer(struct spi_device *spi,
        if ((mpc8xxx_spi->spibrg / hz) > 64) {
                cs->hw_mode |= SPMODE_DIV16;
                pm = (mpc8xxx_spi->spibrg - 1) / (hz * 64) + 1;
-
-               WARN_ONCE(pm > 16, "%s: Requested speed is too low: %d Hz. "
-                         "Will use %d Hz instead.\n", dev_name(&spi->dev),
-                         hz, mpc8xxx_spi->spibrg / 1024);
+               WARN_ONCE(pm > 16,
+                         "%s: Requested speed is too low: %d Hz. Will use %d Hz instead.\n",
+                         dev_name(&spi->dev), hz, mpc8xxx_spi->spibrg / 1024);
                if (pm > 16)
                        pm = 16;
        } else {
@@ -727,12 +726,13 @@ static int of_fsl_spi_get_chipselects(struct device *dev)
                return 0;
        }
 
-       pinfo->gpios = kmalloc(ngpios * sizeof(*pinfo->gpios), GFP_KERNEL);
+       pinfo->gpios = kmalloc_array(ngpios, sizeof(*pinfo->gpios),
+                                    GFP_KERNEL);
        if (!pinfo->gpios)
                return -ENOMEM;
        memset(pinfo->gpios, -1, ngpios * sizeof(*pinfo->gpios));
 
-       pinfo->alow_flags = kzalloc(ngpios * sizeof(*pinfo->alow_flags),
+       pinfo->alow_flags = kcalloc(ngpios, sizeof(*pinfo->alow_flags),
                                    GFP_KERNEL);
        if (!pinfo->alow_flags) {
                ret = -ENOMEM;
@@ -762,8 +762,9 @@ static int of_fsl_spi_get_chipselects(struct device *dev)
                ret = gpio_direction_output(pinfo->gpios[i],
                                            pinfo->alow_flags[i]);
                if (ret) {
-                       dev_err(dev, "can't set output direction for gpio "
-                               "#%d: %d\n", i, ret);
+                       dev_err(dev,
+                               "can't set output direction for gpio #%d: %d\n",
+                               i, ret);
                        goto err_loop;
                }
        }
index 32ced64a5bb9a012e2edd0d415d8c477ebc0522a..9a7c62f471dc8cb2e62638d2846989da076442ae 100644 (file)
@@ -211,7 +211,7 @@ static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi,
                         struct spi_transfer *transfer)
 {
        struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
-       unsigned int bpw;
+       unsigned int bpw, i;
 
        if (!master->dma_rx)
                return false;
@@ -228,12 +228,16 @@ static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi,
        if (bpw != 1 && bpw != 2 && bpw != 4)
                return false;
 
-       if (transfer->len < spi_imx->wml * bpw)
-               return false;
+       for (i = spi_imx_get_fifosize(spi_imx) / 2; i > 0; i--) {
+               if (!(transfer->len % (i * bpw)))
+                       break;
+       }
 
-       if (transfer->len % (spi_imx->wml * bpw))
+       if (i == 0)
                return false;
 
+       spi_imx->wml = i;
+
        return true;
 }
 
@@ -837,10 +841,6 @@ static int spi_imx_dma_configure(struct spi_master *master,
        struct dma_slave_config rx = {}, tx = {};
        struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
 
-       if (bytes_per_word == spi_imx->bytes_per_word)
-               /* Same as last time */
-               return 0;
-
        switch (bytes_per_word) {
        case 4:
                buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
diff --git a/drivers/spi/spi-lantiq-ssc.c b/drivers/spi/spi-lantiq-ssc.c
new file mode 100644 (file)
index 0000000..8a626f7
--- /dev/null
@@ -0,0 +1,983 @@
+/*
+ * Copyright (C) 2011-2015 Daniel Schwierzeck <daniel.schwierzeck@gmail.com>
+ * Copyright (C) 2016 Hauke Mehrtens <hauke@hauke-m.de>
+ *
+ * This program is free software; you can distribute it and/or modify it
+ * under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/completion.h>
+#include <linux/spinlock.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/pm_runtime.h>
+#include <linux/spi/spi.h>
+
+#ifdef CONFIG_LANTIQ
+#include <lantiq_soc.h>
+#endif
+
+#define SPI_RX_IRQ_NAME                "spi_rx"
+#define SPI_TX_IRQ_NAME                "spi_tx"
+#define SPI_ERR_IRQ_NAME       "spi_err"
+#define SPI_FRM_IRQ_NAME       "spi_frm"
+
+#define SPI_CLC                        0x00
+#define SPI_PISEL              0x04
+#define SPI_ID                 0x08
+#define SPI_CON                        0x10
+#define SPI_STAT               0x14
+#define SPI_WHBSTATE           0x18
+#define SPI_TB                 0x20
+#define SPI_RB                 0x24
+#define SPI_RXFCON             0x30
+#define SPI_TXFCON             0x34
+#define SPI_FSTAT              0x38
+#define SPI_BRT                        0x40
+#define SPI_BRSTAT             0x44
+#define SPI_SFCON              0x60
+#define SPI_SFSTAT             0x64
+#define SPI_GPOCON             0x70
+#define SPI_GPOSTAT            0x74
+#define SPI_FPGO               0x78
+#define SPI_RXREQ              0x80
+#define SPI_RXCNT              0x84
+#define SPI_DMACON             0xec
+#define SPI_IRNEN              0xf4
+#define SPI_IRNICR             0xf8
+#define SPI_IRNCR              0xfc
+
+#define SPI_CLC_SMC_S          16      /* Clock divider for sleep mode */
+#define SPI_CLC_SMC_M          (0xFF << SPI_CLC_SMC_S)
+#define SPI_CLC_RMC_S          8       /* Clock divider for normal run mode */
+#define SPI_CLC_RMC_M          (0xFF << SPI_CLC_RMC_S)
+#define SPI_CLC_DISS           BIT(1)  /* Disable status bit */
+#define SPI_CLC_DISR           BIT(0)  /* Disable request bit */
+
+#define SPI_ID_TXFS_S          24      /* Implemented TX FIFO size */
+#define SPI_ID_TXFS_M          (0x3F << SPI_ID_TXFS_S)
+#define SPI_ID_RXFS_S          16      /* Implemented RX FIFO size */
+#define SPI_ID_RXFS_M          (0x3F << SPI_ID_RXFS_S)
+#define SPI_ID_MOD_S           8       /* Module ID */
+#define SPI_ID_MOD_M           (0xff << SPI_ID_MOD_S)
+#define SPI_ID_CFG_S           5       /* DMA interface support */
+#define SPI_ID_CFG_M           (1 << SPI_ID_CFG_S)
+#define SPI_ID_REV_M           0x1F    /* Hardware revision number */
+
+#define SPI_CON_BM_S           16      /* Data width selection */
+#define SPI_CON_BM_M           (0x1F << SPI_CON_BM_S)
+#define SPI_CON_EM             BIT(24) /* Echo mode */
+#define SPI_CON_IDLE           BIT(23) /* Idle bit value */
+#define SPI_CON_ENBV           BIT(22) /* Enable byte valid control */
+#define SPI_CON_RUEN           BIT(12) /* Receive underflow error enable */
+#define SPI_CON_TUEN           BIT(11) /* Transmit underflow error enable */
+#define SPI_CON_AEN            BIT(10) /* Abort error enable */
+#define SPI_CON_REN            BIT(9)  /* Receive overflow error enable */
+#define SPI_CON_TEN            BIT(8)  /* Transmit overflow error enable */
+#define SPI_CON_LB             BIT(7)  /* Loopback control */
+#define SPI_CON_PO             BIT(6)  /* Clock polarity control */
+#define SPI_CON_PH             BIT(5)  /* Clock phase control */
+#define SPI_CON_HB             BIT(4)  /* Heading control */
+#define SPI_CON_RXOFF          BIT(1)  /* Switch receiver off */
+#define SPI_CON_TXOFF          BIT(0)  /* Switch transmitter off */
+
+#define SPI_STAT_RXBV_S                28
+#define SPI_STAT_RXBV_M                (0x7 << SPI_STAT_RXBV_S)
+#define SPI_STAT_BSY           BIT(13) /* Busy flag */
+#define SPI_STAT_RUE           BIT(12) /* Receive underflow error flag */
+#define SPI_STAT_TUE           BIT(11) /* Transmit underflow error flag */
+#define SPI_STAT_AE            BIT(10) /* Abort error flag */
+#define SPI_STAT_RE            BIT(9)  /* Receive error flag */
+#define SPI_STAT_TE            BIT(8)  /* Transmit error flag */
+#define SPI_STAT_ME            BIT(7)  /* Mode error flag */
+#define SPI_STAT_MS            BIT(1)  /* Master/slave select bit */
+#define SPI_STAT_EN            BIT(0)  /* Enable bit */
+#define SPI_STAT_ERRORS                (SPI_STAT_ME | SPI_STAT_TE | SPI_STAT_RE | \
+                                SPI_STAT_AE | SPI_STAT_TUE | SPI_STAT_RUE)
+
+#define SPI_WHBSTATE_SETTUE    BIT(15) /* Set transmit underflow error flag */
+#define SPI_WHBSTATE_SETAE     BIT(14) /* Set abort error flag */
+#define SPI_WHBSTATE_SETRE     BIT(13) /* Set receive error flag */
+#define SPI_WHBSTATE_SETTE     BIT(12) /* Set transmit error flag */
+#define SPI_WHBSTATE_CLRTUE    BIT(11) /* Clear transmit underflow error flag */
+#define SPI_WHBSTATE_CLRAE     BIT(10) /* Clear abort error flag */
+#define SPI_WHBSTATE_CLRRE     BIT(9)  /* Clear receive error flag */
+#define SPI_WHBSTATE_CLRTE     BIT(8)  /* Clear transmit error flag */
+#define SPI_WHBSTATE_SETME     BIT(7)  /* Set mode error flag */
+#define SPI_WHBSTATE_CLRME     BIT(6)  /* Clear mode error flag */
+#define SPI_WHBSTATE_SETRUE    BIT(5)  /* Set receive underflow error flag */
+#define SPI_WHBSTATE_CLRRUE    BIT(4)  /* Clear receive underflow error flag */
+#define SPI_WHBSTATE_SETMS     BIT(3)  /* Set master select bit */
+#define SPI_WHBSTATE_CLRMS     BIT(2)  /* Clear master select bit */
+#define SPI_WHBSTATE_SETEN     BIT(1)  /* Set enable bit (operational mode) */
+#define SPI_WHBSTATE_CLREN     BIT(0)  /* Clear enable bit (config mode */
+#define SPI_WHBSTATE_CLR_ERRORS        (SPI_WHBSTATE_CLRRUE | SPI_WHBSTATE_CLRME | \
+                                SPI_WHBSTATE_CLRTE | SPI_WHBSTATE_CLRRE | \
+                                SPI_WHBSTATE_CLRAE | SPI_WHBSTATE_CLRTUE)
+
+#define SPI_RXFCON_RXFITL_S    8       /* FIFO interrupt trigger level */
+#define SPI_RXFCON_RXFITL_M    (0x3F << SPI_RXFCON_RXFITL_S)
+#define SPI_RXFCON_RXFLU       BIT(1)  /* FIFO flush */
+#define SPI_RXFCON_RXFEN       BIT(0)  /* FIFO enable */
+
+#define SPI_TXFCON_TXFITL_S    8       /* FIFO interrupt trigger level */
+#define SPI_TXFCON_TXFITL_M    (0x3F << SPI_TXFCON_TXFITL_S)
+#define SPI_TXFCON_TXFLU       BIT(1)  /* FIFO flush */
+#define SPI_TXFCON_TXFEN       BIT(0)  /* FIFO enable */
+
+#define SPI_FSTAT_RXFFL_S      0
+#define SPI_FSTAT_RXFFL_M      (0x3f << SPI_FSTAT_RXFFL_S)
+#define SPI_FSTAT_TXFFL_S      8
+#define SPI_FSTAT_TXFFL_M      (0x3f << SPI_FSTAT_TXFFL_S)
+
+#define SPI_GPOCON_ISCSBN_S    8
+#define SPI_GPOCON_INVOUTN_S   0
+
+#define SPI_FGPO_SETOUTN_S     8
+#define SPI_FGPO_CLROUTN_S     0
+
+#define SPI_RXREQ_RXCNT_M      0xFFFF  /* Receive count value */
+#define SPI_RXCNT_TODO_M       0xFFFF  /* Recevie to-do value */
+
+#define SPI_IRNEN_TFI          BIT(4)  /* TX finished interrupt */
+#define SPI_IRNEN_F            BIT(3)  /* Frame end interrupt request */
+#define SPI_IRNEN_E            BIT(2)  /* Error end interrupt request */
+#define SPI_IRNEN_T_XWAY       BIT(1)  /* Transmit end interrupt request */
+#define SPI_IRNEN_R_XWAY       BIT(0)  /* Receive end interrupt request */
+#define SPI_IRNEN_R_XRX                BIT(1)  /* Transmit end interrupt request */
+#define SPI_IRNEN_T_XRX                BIT(0)  /* Receive end interrupt request */
+#define SPI_IRNEN_ALL          0x1F
+
+struct lantiq_ssc_hwcfg {
+       unsigned int irnen_r;
+       unsigned int irnen_t;
+};
+
+struct lantiq_ssc_spi {
+       struct spi_master               *master;
+       struct device                   *dev;
+       void __iomem                    *regbase;
+       struct clk                      *spi_clk;
+       struct clk                      *fpi_clk;
+       const struct lantiq_ssc_hwcfg   *hwcfg;
+
+       spinlock_t                      lock;
+       struct workqueue_struct         *wq;
+       struct work_struct              work;
+
+       const u8                        *tx;
+       u8                              *rx;
+       unsigned int                    tx_todo;
+       unsigned int                    rx_todo;
+       unsigned int                    bits_per_word;
+       unsigned int                    speed_hz;
+       unsigned int                    tx_fifo_size;
+       unsigned int                    rx_fifo_size;
+       unsigned int                    base_cs;
+};
+
+static u32 lantiq_ssc_readl(const struct lantiq_ssc_spi *spi, u32 reg)
+{
+       return __raw_readl(spi->regbase + reg);
+}
+
+static void lantiq_ssc_writel(const struct lantiq_ssc_spi *spi, u32 val,
+                             u32 reg)
+{
+       __raw_writel(val, spi->regbase + reg);
+}
+
+static void lantiq_ssc_maskl(const struct lantiq_ssc_spi *spi, u32 clr,
+                            u32 set, u32 reg)
+{
+       u32 val = __raw_readl(spi->regbase + reg);
+
+       val &= ~clr;
+       val |= set;
+       __raw_writel(val, spi->regbase + reg);
+}
+
+static unsigned int tx_fifo_level(const struct lantiq_ssc_spi *spi)
+{
+       u32 fstat = lantiq_ssc_readl(spi, SPI_FSTAT);
+
+       return (fstat & SPI_FSTAT_TXFFL_M) >> SPI_FSTAT_TXFFL_S;
+}
+
+static unsigned int rx_fifo_level(const struct lantiq_ssc_spi *spi)
+{
+       u32 fstat = lantiq_ssc_readl(spi, SPI_FSTAT);
+
+       return fstat & SPI_FSTAT_RXFFL_M;
+}
+
+static unsigned int tx_fifo_free(const struct lantiq_ssc_spi *spi)
+{
+       return spi->tx_fifo_size - tx_fifo_level(spi);
+}
+
+static void rx_fifo_reset(const struct lantiq_ssc_spi *spi)
+{
+       u32 val = spi->rx_fifo_size << SPI_RXFCON_RXFITL_S;
+
+       val |= SPI_RXFCON_RXFEN | SPI_RXFCON_RXFLU;
+       lantiq_ssc_writel(spi, val, SPI_RXFCON);
+}
+
+static void tx_fifo_reset(const struct lantiq_ssc_spi *spi)
+{
+       u32 val = 1 << SPI_TXFCON_TXFITL_S;
+
+       val |= SPI_TXFCON_TXFEN | SPI_TXFCON_TXFLU;
+       lantiq_ssc_writel(spi, val, SPI_TXFCON);
+}
+
+static void rx_fifo_flush(const struct lantiq_ssc_spi *spi)
+{
+       lantiq_ssc_maskl(spi, 0, SPI_RXFCON_RXFLU, SPI_RXFCON);
+}
+
+static void tx_fifo_flush(const struct lantiq_ssc_spi *spi)
+{
+       lantiq_ssc_maskl(spi, 0, SPI_TXFCON_TXFLU, SPI_TXFCON);
+}
+
+static void hw_enter_config_mode(const struct lantiq_ssc_spi *spi)
+{
+       lantiq_ssc_writel(spi, SPI_WHBSTATE_CLREN, SPI_WHBSTATE);
+}
+
+static void hw_enter_active_mode(const struct lantiq_ssc_spi *spi)
+{
+       lantiq_ssc_writel(spi, SPI_WHBSTATE_SETEN, SPI_WHBSTATE);
+}
+
+static void hw_setup_speed_hz(const struct lantiq_ssc_spi *spi,
+                             unsigned int max_speed_hz)
+{
+       u32 spi_clk, brt;
+
+       /*
+        * SPI module clock is derived from FPI bus clock dependent on
+        * divider value in CLC.RMS which is always set to 1.
+        *
+        *                 f_SPI
+        * baudrate = --------------
+        *             2 * (BR + 1)
+        */
+       spi_clk = clk_get_rate(spi->fpi_clk) / 2;
+
+       if (max_speed_hz > spi_clk)
+               brt = 0;
+       else
+               brt = spi_clk / max_speed_hz - 1;
+
+       if (brt > 0xFFFF)
+               brt = 0xFFFF;
+
+       dev_dbg(spi->dev, "spi_clk %u, max_speed_hz %u, brt %u\n",
+               spi_clk, max_speed_hz, brt);
+
+       lantiq_ssc_writel(spi, brt, SPI_BRT);
+}
+
+static void hw_setup_bits_per_word(const struct lantiq_ssc_spi *spi,
+                                  unsigned int bits_per_word)
+{
+       u32 bm;
+
+       /* CON.BM value = bits_per_word - 1 */
+       bm = (bits_per_word - 1) << SPI_CON_BM_S;
+
+       lantiq_ssc_maskl(spi, SPI_CON_BM_M, bm, SPI_CON);
+}
+
+static void hw_setup_clock_mode(const struct lantiq_ssc_spi *spi,
+                               unsigned int mode)
+{
+       u32 con_set = 0, con_clr = 0;
+
+       /*
+        * SPI mode mapping in CON register:
+        * Mode CPOL CPHA CON.PO CON.PH
+        *  0    0    0      0      1
+        *  1    0    1      0      0
+        *  2    1    0      1      1
+        *  3    1    1      1      0
+        */
+       if (mode & SPI_CPHA)
+               con_clr |= SPI_CON_PH;
+       else
+               con_set |= SPI_CON_PH;
+
+       if (mode & SPI_CPOL)
+               con_set |= SPI_CON_PO | SPI_CON_IDLE;
+       else
+               con_clr |= SPI_CON_PO | SPI_CON_IDLE;
+
+       /* Set heading control */
+       if (mode & SPI_LSB_FIRST)
+               con_clr |= SPI_CON_HB;
+       else
+               con_set |= SPI_CON_HB;
+
+       /* Set loopback mode */
+       if (mode & SPI_LOOP)
+               con_set |= SPI_CON_LB;
+       else
+               con_clr |= SPI_CON_LB;
+
+       lantiq_ssc_maskl(spi, con_clr, con_set, SPI_CON);
+}
+
+static void lantiq_ssc_hw_init(const struct lantiq_ssc_spi *spi)
+{
+       const struct lantiq_ssc_hwcfg *hwcfg = spi->hwcfg;
+
+       /*
+        * Set clock divider for run mode to 1 to
+        * run at same frequency as FPI bus
+        */
+       lantiq_ssc_writel(spi, 1 << SPI_CLC_RMC_S, SPI_CLC);
+
+       /* Put controller into config mode */
+       hw_enter_config_mode(spi);
+
+       /* Clear error flags */
+       lantiq_ssc_maskl(spi, 0, SPI_WHBSTATE_CLR_ERRORS, SPI_WHBSTATE);
+
+       /* Enable error checking, disable TX/RX */
+       lantiq_ssc_writel(spi, SPI_CON_RUEN | SPI_CON_AEN | SPI_CON_TEN |
+               SPI_CON_REN | SPI_CON_TXOFF | SPI_CON_RXOFF, SPI_CON);
+
+       /* Setup default SPI mode */
+       hw_setup_bits_per_word(spi, spi->bits_per_word);
+       hw_setup_clock_mode(spi, SPI_MODE_0);
+
+       /* Enable master mode and clear error flags */
+       lantiq_ssc_writel(spi, SPI_WHBSTATE_SETMS | SPI_WHBSTATE_CLR_ERRORS,
+                              SPI_WHBSTATE);
+
+       /* Reset GPIO/CS registers */
+       lantiq_ssc_writel(spi, 0, SPI_GPOCON);
+       lantiq_ssc_writel(spi, 0xFF00, SPI_FPGO);
+
+       /* Enable and flush FIFOs */
+       rx_fifo_reset(spi);
+       tx_fifo_reset(spi);
+
+       /* Enable interrupts */
+       lantiq_ssc_writel(spi, hwcfg->irnen_t | hwcfg->irnen_r | SPI_IRNEN_E,
+                         SPI_IRNEN);
+}
+
+static int lantiq_ssc_setup(struct spi_device *spidev)
+{
+       struct spi_master *master = spidev->master;
+       struct lantiq_ssc_spi *spi = spi_master_get_devdata(master);
+       unsigned int cs = spidev->chip_select;
+       u32 gpocon;
+
+       /* GPIOs are used for CS */
+       if (gpio_is_valid(spidev->cs_gpio))
+               return 0;
+
+       dev_dbg(spi->dev, "using internal chipselect %u\n", cs);
+
+       if (cs < spi->base_cs) {
+               dev_err(spi->dev,
+                       "chipselect %i too small (min %i)\n", cs, spi->base_cs);
+               return -EINVAL;
+       }
+
+       /* set GPO pin to CS mode */
+       gpocon = 1 << ((cs - spi->base_cs) + SPI_GPOCON_ISCSBN_S);
+
+       /* invert GPO pin */
+       if (spidev->mode & SPI_CS_HIGH)
+               gpocon |= 1 << (cs - spi->base_cs);
+
+       lantiq_ssc_maskl(spi, 0, gpocon, SPI_GPOCON);
+
+       return 0;
+}
+
+static int lantiq_ssc_prepare_message(struct spi_master *master,
+                                     struct spi_message *message)
+{
+       struct lantiq_ssc_spi *spi = spi_master_get_devdata(master);
+
+       hw_enter_config_mode(spi);
+       hw_setup_clock_mode(spi, message->spi->mode);
+       hw_enter_active_mode(spi);
+
+       return 0;
+}
+
+static void hw_setup_transfer(struct lantiq_ssc_spi *spi,
+                             struct spi_device *spidev, struct spi_transfer *t)
+{
+       unsigned int speed_hz = t->speed_hz;
+       unsigned int bits_per_word = t->bits_per_word;
+       u32 con;
+
+       if (bits_per_word != spi->bits_per_word ||
+               speed_hz != spi->speed_hz) {
+               hw_enter_config_mode(spi);
+               hw_setup_speed_hz(spi, speed_hz);
+               hw_setup_bits_per_word(spi, bits_per_word);
+               hw_enter_active_mode(spi);
+
+               spi->speed_hz = speed_hz;
+               spi->bits_per_word = bits_per_word;
+       }
+
+       /* Configure transmitter and receiver */
+       con = lantiq_ssc_readl(spi, SPI_CON);
+       if (t->tx_buf)
+               con &= ~SPI_CON_TXOFF;
+       else
+               con |= SPI_CON_TXOFF;
+
+       if (t->rx_buf)
+               con &= ~SPI_CON_RXOFF;
+       else
+               con |= SPI_CON_RXOFF;
+
+       lantiq_ssc_writel(spi, con, SPI_CON);
+}
+
+static int lantiq_ssc_unprepare_message(struct spi_master *master,
+                                       struct spi_message *message)
+{
+       struct lantiq_ssc_spi *spi = spi_master_get_devdata(master);
+
+       flush_workqueue(spi->wq);
+
+       /* Disable transmitter and receiver while idle */
+       lantiq_ssc_maskl(spi, 0, SPI_CON_TXOFF | SPI_CON_RXOFF, SPI_CON);
+
+       return 0;
+}
+
+static void tx_fifo_write(struct lantiq_ssc_spi *spi)
+{
+       const u8 *tx8;
+       const u16 *tx16;
+       const u32 *tx32;
+       u32 data;
+       unsigned int tx_free = tx_fifo_free(spi);
+
+       while (spi->tx_todo && tx_free) {
+               switch (spi->bits_per_word) {
+               case 2 ... 8:
+                       tx8 = spi->tx;
+                       data = *tx8;
+                       spi->tx_todo--;
+                       spi->tx++;
+                       break;
+               case 16:
+                       tx16 = (u16 *) spi->tx;
+                       data = *tx16;
+                       spi->tx_todo -= 2;
+                       spi->tx += 2;
+                       break;
+               case 32:
+                       tx32 = (u32 *) spi->tx;
+                       data = *tx32;
+                       spi->tx_todo -= 4;
+                       spi->tx += 4;
+                       break;
+               default:
+                       WARN_ON(1);
+                       data = 0;
+                       break;
+               }
+
+               lantiq_ssc_writel(spi, data, SPI_TB);
+               tx_free--;
+       }
+}
+
+static void rx_fifo_read_full_duplex(struct lantiq_ssc_spi *spi)
+{
+       u8 *rx8;
+       u16 *rx16;
+       u32 *rx32;
+       u32 data;
+       unsigned int rx_fill = rx_fifo_level(spi);
+
+       while (rx_fill) {
+               data = lantiq_ssc_readl(spi, SPI_RB);
+
+               switch (spi->bits_per_word) {
+               case 2 ... 8:
+                       rx8 = spi->rx;
+                       *rx8 = data;
+                       spi->rx_todo--;
+                       spi->rx++;
+                       break;
+               case 16:
+                       rx16 = (u16 *) spi->rx;
+                       *rx16 = data;
+                       spi->rx_todo -= 2;
+                       spi->rx += 2;
+                       break;
+               case 32:
+                       rx32 = (u32 *) spi->rx;
+                       *rx32 = data;
+                       spi->rx_todo -= 4;
+                       spi->rx += 4;
+                       break;
+               default:
+                       WARN_ON(1);
+                       break;
+               }
+
+               rx_fill--;
+       }
+}
+
+static void rx_fifo_read_half_duplex(struct lantiq_ssc_spi *spi)
+{
+       u32 data, *rx32;
+       u8 *rx8;
+       unsigned int rxbv, shift;
+       unsigned int rx_fill = rx_fifo_level(spi);
+
+       /*
+        * In RX-only mode the bits per word value is ignored by HW. A value
+        * of 32 is used instead. Thus all 4 bytes per FIFO must be read.
+        * If remaining RX bytes are less than 4, the FIFO must be read
+        * differently. The amount of received and valid bytes is indicated
+        * by STAT.RXBV register value.
+        */
+       while (rx_fill) {
+               if (spi->rx_todo < 4)  {
+                       rxbv = (lantiq_ssc_readl(spi, SPI_STAT) &
+                               SPI_STAT_RXBV_M) >> SPI_STAT_RXBV_S;
+                       data = lantiq_ssc_readl(spi, SPI_RB);
+
+                       shift = (rxbv - 1) * 8;
+                       rx8 = spi->rx;
+
+                       while (rxbv) {
+                               *rx8++ = (data >> shift) & 0xFF;
+                               rxbv--;
+                               shift -= 8;
+                               spi->rx_todo--;
+                               spi->rx++;
+                       }
+               } else {
+                       data = lantiq_ssc_readl(spi, SPI_RB);
+                       rx32 = (u32 *) spi->rx;
+
+                       *rx32++ = data;
+                       spi->rx_todo -= 4;
+                       spi->rx += 4;
+               }
+               rx_fill--;
+       }
+}
+
+static void rx_request(struct lantiq_ssc_spi *spi)
+{
+       unsigned int rxreq, rxreq_max;
+
+       /*
+        * To avoid receive overflows at high clocks it is better to request
+        * only the amount of bytes that fits into all FIFOs. This value
+        * depends on the FIFO size implemented in hardware.
+        */
+       rxreq = spi->rx_todo;
+       rxreq_max = spi->rx_fifo_size * 4;
+       if (rxreq > rxreq_max)
+               rxreq = rxreq_max;
+
+       lantiq_ssc_writel(spi, rxreq, SPI_RXREQ);
+}
+
+static irqreturn_t lantiq_ssc_xmit_interrupt(int irq, void *data)
+{
+       struct lantiq_ssc_spi *spi = data;
+
+       if (spi->tx) {
+               if (spi->rx && spi->rx_todo)
+                       rx_fifo_read_full_duplex(spi);
+
+               if (spi->tx_todo)
+                       tx_fifo_write(spi);
+               else if (!tx_fifo_level(spi))
+                       goto completed;
+       } else if (spi->rx) {
+               if (spi->rx_todo) {
+                       rx_fifo_read_half_duplex(spi);
+
+                       if (spi->rx_todo)
+                               rx_request(spi);
+                       else
+                               goto completed;
+               } else {
+                       goto completed;
+               }
+       }
+
+       return IRQ_HANDLED;
+
+completed:
+       queue_work(spi->wq, &spi->work);
+
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t lantiq_ssc_err_interrupt(int irq, void *data)
+{
+       struct lantiq_ssc_spi *spi = data;
+       u32 stat = lantiq_ssc_readl(spi, SPI_STAT);
+
+       if (!(stat & SPI_STAT_ERRORS))
+               return IRQ_NONE;
+
+       if (stat & SPI_STAT_RUE)
+               dev_err(spi->dev, "receive underflow error\n");
+       if (stat & SPI_STAT_TUE)
+               dev_err(spi->dev, "transmit underflow error\n");
+       if (stat & SPI_STAT_AE)
+               dev_err(spi->dev, "abort error\n");
+       if (stat & SPI_STAT_RE)
+               dev_err(spi->dev, "receive overflow error\n");
+       if (stat & SPI_STAT_TE)
+               dev_err(spi->dev, "transmit overflow error\n");
+       if (stat & SPI_STAT_ME)
+               dev_err(spi->dev, "mode error\n");
+
+       /* Clear error flags */
+       lantiq_ssc_maskl(spi, 0, SPI_WHBSTATE_CLR_ERRORS, SPI_WHBSTATE);
+
+       /* set bad status so it can be retried */
+       if (spi->master->cur_msg)
+               spi->master->cur_msg->status = -EIO;
+       queue_work(spi->wq, &spi->work);
+
+       return IRQ_HANDLED;
+}
+
+static int transfer_start(struct lantiq_ssc_spi *spi, struct spi_device *spidev,
+                         struct spi_transfer *t)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&spi->lock, flags);
+
+       spi->tx = t->tx_buf;
+       spi->rx = t->rx_buf;
+
+       if (t->tx_buf) {
+               spi->tx_todo = t->len;
+
+               /* initially fill TX FIFO */
+               tx_fifo_write(spi);
+       }
+
+       if (spi->rx) {
+               spi->rx_todo = t->len;
+
+               /* start shift clock in RX-only mode */
+               if (!spi->tx)
+                       rx_request(spi);
+       }
+
+       spin_unlock_irqrestore(&spi->lock, flags);
+
+       return t->len;
+}
+
+/*
+ * The driver only gets an interrupt when the FIFO is empty, but there
+ * is an additional shift register from which the data is written to
+ * the wire. We get the last interrupt when the controller starts to
+ * write the last word to the wire, not when it is finished. Do busy
+ * waiting till it finishes.
+ */
+static void lantiq_ssc_bussy_work(struct work_struct *work)
+{
+       struct lantiq_ssc_spi *spi;
+       unsigned long long timeout = 8LL * 1000LL;
+       unsigned long end;
+
+       spi = container_of(work, typeof(*spi), work);
+
+       do_div(timeout, spi->speed_hz);
+       timeout += timeout + 100; /* some tolerance */
+
+       end = jiffies + msecs_to_jiffies(timeout);
+       do {
+               u32 stat = lantiq_ssc_readl(spi, SPI_STAT);
+
+               if (!(stat & SPI_STAT_BSY)) {
+                       spi_finalize_current_transfer(spi->master);
+                       return;
+               }
+
+               cond_resched();
+       } while (!time_after_eq(jiffies, end));
+
+       if (spi->master->cur_msg)
+               spi->master->cur_msg->status = -EIO;
+       spi_finalize_current_transfer(spi->master);
+}
+
+static void lantiq_ssc_handle_err(struct spi_master *master,
+                                 struct spi_message *message)
+{
+       struct lantiq_ssc_spi *spi = spi_master_get_devdata(master);
+
+       /* flush FIFOs on timeout */
+       rx_fifo_flush(spi);
+       tx_fifo_flush(spi);
+}
+
+static void lantiq_ssc_set_cs(struct spi_device *spidev, bool enable)
+{
+       struct lantiq_ssc_spi *spi = spi_master_get_devdata(spidev->master);
+       unsigned int cs = spidev->chip_select;
+       u32 fgpo;
+
+       if (!!(spidev->mode & SPI_CS_HIGH) == enable)
+               fgpo = (1 << (cs - spi->base_cs));
+       else
+               fgpo = (1 << (cs - spi->base_cs + SPI_FGPO_SETOUTN_S));
+
+       lantiq_ssc_writel(spi, fgpo, SPI_FPGO);
+}
+
+static int lantiq_ssc_transfer_one(struct spi_master *master,
+                                  struct spi_device *spidev,
+                                  struct spi_transfer *t)
+{
+       struct lantiq_ssc_spi *spi = spi_master_get_devdata(master);
+
+       hw_setup_transfer(spi, spidev, t);
+
+       return transfer_start(spi, spidev, t);
+}
+
+static const struct lantiq_ssc_hwcfg lantiq_ssc_xway = {
+       .irnen_r = SPI_IRNEN_R_XWAY,
+       .irnen_t = SPI_IRNEN_T_XWAY,
+};
+
+static const struct lantiq_ssc_hwcfg lantiq_ssc_xrx = {
+       .irnen_r = SPI_IRNEN_R_XRX,
+       .irnen_t = SPI_IRNEN_T_XRX,
+};
+
+static const struct of_device_id lantiq_ssc_match[] = {
+       { .compatible = "lantiq,ase-spi", .data = &lantiq_ssc_xway, },
+       { .compatible = "lantiq,falcon-spi", .data = &lantiq_ssc_xrx, },
+       { .compatible = "lantiq,xrx100-spi", .data = &lantiq_ssc_xrx, },
+       {},
+};
+MODULE_DEVICE_TABLE(of, lantiq_ssc_match);
+
+static int lantiq_ssc_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct spi_master *master;
+       struct resource *res;
+       struct lantiq_ssc_spi *spi;
+       const struct lantiq_ssc_hwcfg *hwcfg;
+       const struct of_device_id *match;
+       int err, rx_irq, tx_irq, err_irq;
+       u32 id, supports_dma, revision;
+       unsigned int num_cs;
+
+       match = of_match_device(lantiq_ssc_match, dev);
+       if (!match) {
+               dev_err(dev, "no device match\n");
+               return -EINVAL;
+       }
+       hwcfg = match->data;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!res) {
+               dev_err(dev, "failed to get resources\n");
+               return -ENXIO;
+       }
+
+       rx_irq = platform_get_irq_byname(pdev, SPI_RX_IRQ_NAME);
+       if (rx_irq < 0) {
+               dev_err(dev, "failed to get %s\n", SPI_RX_IRQ_NAME);
+               return -ENXIO;
+       }
+
+       tx_irq = platform_get_irq_byname(pdev, SPI_TX_IRQ_NAME);
+       if (tx_irq < 0) {
+               dev_err(dev, "failed to get %s\n", SPI_TX_IRQ_NAME);
+               return -ENXIO;
+       }
+
+       err_irq = platform_get_irq_byname(pdev, SPI_ERR_IRQ_NAME);
+       if (err_irq < 0) {
+               dev_err(dev, "failed to get %s\n", SPI_ERR_IRQ_NAME);
+               return -ENXIO;
+       }
+
+       master = spi_alloc_master(dev, sizeof(struct lantiq_ssc_spi));
+       if (!master)
+               return -ENOMEM;
+
+       spi = spi_master_get_devdata(master);
+       spi->master = master;
+       spi->dev = dev;
+       spi->hwcfg = hwcfg;
+       platform_set_drvdata(pdev, spi);
+
+       spi->regbase = devm_ioremap_resource(dev, res);
+       if (IS_ERR(spi->regbase)) {
+               err = PTR_ERR(spi->regbase);
+               goto err_master_put;
+       }
+
+       err = devm_request_irq(dev, rx_irq, lantiq_ssc_xmit_interrupt,
+                              0, SPI_RX_IRQ_NAME, spi);
+       if (err)
+               goto err_master_put;
+
+       err = devm_request_irq(dev, tx_irq, lantiq_ssc_xmit_interrupt,
+                              0, SPI_TX_IRQ_NAME, spi);
+       if (err)
+               goto err_master_put;
+
+       err = devm_request_irq(dev, err_irq, lantiq_ssc_err_interrupt,
+                              0, SPI_ERR_IRQ_NAME, spi);
+       if (err)
+               goto err_master_put;
+
+       spi->spi_clk = devm_clk_get(dev, "gate");
+       if (IS_ERR(spi->spi_clk)) {
+               err = PTR_ERR(spi->spi_clk);
+               goto err_master_put;
+       }
+       err = clk_prepare_enable(spi->spi_clk);
+       if (err)
+               goto err_master_put;
+
+       /*
+        * Use the old clk_get_fpi() function on Lantiq platform, till it
+        * supports common clk.
+        */
+#if defined(CONFIG_LANTIQ) && !defined(CONFIG_COMMON_CLK)
+       spi->fpi_clk = clk_get_fpi();
+#else
+       spi->fpi_clk = clk_get(dev, "freq");
+#endif
+       if (IS_ERR(spi->fpi_clk)) {
+               err = PTR_ERR(spi->fpi_clk);
+               goto err_clk_disable;
+       }
+
+       num_cs = 8;
+       of_property_read_u32(pdev->dev.of_node, "num-cs", &num_cs);
+
+       spi->base_cs = 1;
+       of_property_read_u32(pdev->dev.of_node, "base-cs", &spi->base_cs);
+
+       spin_lock_init(&spi->lock);
+       spi->bits_per_word = 8;
+       spi->speed_hz = 0;
+
+       master->dev.of_node = pdev->dev.of_node;
+       master->num_chipselect = num_cs;
+       master->setup = lantiq_ssc_setup;
+       master->set_cs = lantiq_ssc_set_cs;
+       master->handle_err = lantiq_ssc_handle_err;
+       master->prepare_message = lantiq_ssc_prepare_message;
+       master->unprepare_message = lantiq_ssc_unprepare_message;
+       master->transfer_one = lantiq_ssc_transfer_one;
+       master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | SPI_CS_HIGH |
+                               SPI_LOOP;
+       master->bits_per_word_mask = SPI_BPW_RANGE_MASK(2, 8) |
+                                    SPI_BPW_MASK(16) | SPI_BPW_MASK(32);
+
+       spi->wq = alloc_ordered_workqueue(dev_name(dev), 0);
+       if (!spi->wq) {
+               err = -ENOMEM;
+               goto err_clk_put;
+       }
+       INIT_WORK(&spi->work, lantiq_ssc_bussy_work);
+
+       id = lantiq_ssc_readl(spi, SPI_ID);
+       spi->tx_fifo_size = (id & SPI_ID_TXFS_M) >> SPI_ID_TXFS_S;
+       spi->rx_fifo_size = (id & SPI_ID_RXFS_M) >> SPI_ID_RXFS_S;
+       supports_dma = (id & SPI_ID_CFG_M) >> SPI_ID_CFG_S;
+       revision = id & SPI_ID_REV_M;
+
+       lantiq_ssc_hw_init(spi);
+
+       dev_info(dev,
+               "Lantiq SSC SPI controller (Rev %i, TXFS %u, RXFS %u, DMA %u)\n",
+               revision, spi->tx_fifo_size, spi->rx_fifo_size, supports_dma);
+
+       err = devm_spi_register_master(dev, master);
+       if (err) {
+               dev_err(dev, "failed to register spi_master\n");
+               goto err_wq_destroy;
+       }
+
+       return 0;
+
+err_wq_destroy:
+       destroy_workqueue(spi->wq);
+err_clk_put:
+       clk_put(spi->fpi_clk);
+err_clk_disable:
+       clk_disable_unprepare(spi->spi_clk);
+err_master_put:
+       spi_master_put(master);
+
+       return err;
+}
+
+static int lantiq_ssc_remove(struct platform_device *pdev)
+{
+       struct lantiq_ssc_spi *spi = platform_get_drvdata(pdev);
+
+       lantiq_ssc_writel(spi, 0, SPI_IRNEN);
+       lantiq_ssc_writel(spi, 0, SPI_CLC);
+       rx_fifo_flush(spi);
+       tx_fifo_flush(spi);
+       hw_enter_config_mode(spi);
+
+       destroy_workqueue(spi->wq);
+       clk_disable_unprepare(spi->spi_clk);
+       clk_put(spi->fpi_clk);
+
+       return 0;
+}
+
+static struct platform_driver lantiq_ssc_driver = {
+       .probe = lantiq_ssc_probe,
+       .remove = lantiq_ssc_remove,
+       .driver = {
+               .name = "spi-lantiq-ssc",
+               .owner = THIS_MODULE,
+               .of_match_table = lantiq_ssc_match,
+       },
+};
+module_platform_driver(lantiq_ssc_driver);
+
+MODULE_DESCRIPTION("Lantiq SSC SPI controller driver");
+MODULE_AUTHOR("Daniel Schwierzeck <daniel.schwierzeck@gmail.com>");
+MODULE_AUTHOR("Hauke Mehrtens <hauke@hauke-m.de>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:spi-lantiq-ssc");
index c36002110c30a58e158043d5820fc671fb9b5c64..e8b59ce4dc3a74153cb15ef9f128b44070a573da 100644 (file)
@@ -437,8 +437,9 @@ static int mpc52xx_spi_probe(struct platform_device *op)
        ms->gpio_cs_count = of_gpio_count(op->dev.of_node);
        if (ms->gpio_cs_count > 0) {
                master->num_chipselect = ms->gpio_cs_count;
-               ms->gpio_cs = kmalloc(ms->gpio_cs_count * sizeof(unsigned int),
-                               GFP_KERNEL);
+               ms->gpio_cs = kmalloc_array(ms->gpio_cs_count,
+                                           sizeof(*ms->gpio_cs),
+                                           GFP_KERNEL);
                if (!ms->gpio_cs) {
                        rc = -ENOMEM;
                        goto err_alloc_gpio;
@@ -448,8 +449,7 @@ static int mpc52xx_spi_probe(struct platform_device *op)
                        gpio_cs = of_get_gpio(op->dev.of_node, i);
                        if (gpio_cs < 0) {
                                dev_err(&op->dev,
-                                       "could not parse the gpio field "
-                                       "in oftree\n");
+                                       "could not parse the gpio field in oftree\n");
                                rc = -ENODEV;
                                goto err_gpio;
                        }
@@ -457,8 +457,8 @@ static int mpc52xx_spi_probe(struct platform_device *op)
                        rc = gpio_request(gpio_cs, dev_name(&op->dev));
                        if (rc) {
                                dev_err(&op->dev,
-                                       "can't request spi cs gpio #%d "
-                                       "on gpio line %d\n", i, gpio_cs);
+                                       "can't request spi cs gpio #%d on gpio line %d\n",
+                                       i, gpio_cs);
                                goto err_gpio;
                        }
 
index 899d7a8f0889eaf39388ad458b28a1abbeeba671..278867a319506ff830c634cc24b2a2d819021a6b 100644 (file)
@@ -73,7 +73,7 @@
 #define MTK_SPI_IDLE 0
 #define MTK_SPI_PAUSED 1
 
-#define MTK_SPI_MAX_FIFO_SIZE 32
+#define MTK_SPI_MAX_FIFO_SIZE 32U
 #define MTK_SPI_PACKET_SIZE 1024
 
 struct mtk_spi_compatible {
@@ -333,7 +333,7 @@ static int mtk_spi_fifo_transfer(struct spi_master *master,
        struct mtk_spi *mdata = spi_master_get_devdata(master);
 
        mdata->cur_transfer = xfer;
-       mdata->xfer_len = xfer->len;
+       mdata->xfer_len = min(MTK_SPI_MAX_FIFO_SIZE, xfer->len);
        mtk_spi_prepare_transfer(master, xfer);
        mtk_spi_setup_packet(master);
 
@@ -410,7 +410,10 @@ static bool mtk_spi_can_dma(struct spi_master *master,
                            struct spi_device *spi,
                            struct spi_transfer *xfer)
 {
-       return xfer->len > MTK_SPI_MAX_FIFO_SIZE;
+       /* Buffers for DMA transactions must be 4-byte aligned */
+       return (xfer->len > MTK_SPI_MAX_FIFO_SIZE &&
+               (unsigned long)xfer->tx_buf % 4 == 0 &&
+               (unsigned long)xfer->rx_buf % 4 == 0);
 }
 
 static int mtk_spi_setup(struct spi_device *spi)
@@ -451,7 +454,33 @@ static irqreturn_t mtk_spi_interrupt(int irq, void *dev_id)
                                        &reg_val, remainder);
                        }
                }
-               spi_finalize_current_transfer(master);
+
+               trans->len -= mdata->xfer_len;
+               if (!trans->len) {
+                       spi_finalize_current_transfer(master);
+                       return IRQ_HANDLED;
+               }
+
+               if (trans->tx_buf)
+                       trans->tx_buf += mdata->xfer_len;
+               if (trans->rx_buf)
+                       trans->rx_buf += mdata->xfer_len;
+
+               mdata->xfer_len = min(MTK_SPI_MAX_FIFO_SIZE, trans->len);
+               mtk_spi_setup_packet(master);
+
+               cnt = trans->len / 4;
+               iowrite32_rep(mdata->base + SPI_TX_DATA_REG, trans->tx_buf, cnt);
+
+               remainder = trans->len % 4;
+               if (remainder > 0) {
+                       reg_val = 0;
+                       memcpy(&reg_val, trans->tx_buf + (cnt * 4), remainder);
+                       writel(reg_val, mdata->base + SPI_TX_DATA_REG);
+               }
+
+               mtk_spi_enable_transfer(master);
+
                return IRQ_HANDLED;
        }
 
index dd3d0a218d8b1ffc2299595fc55adadaaa3bdeeb..967d94844b30d162b353da096e1d2f865f7128e8 100644 (file)
@@ -411,7 +411,7 @@ static int spi_ppc4xx_of_probe(struct platform_device *op)
        if (num_gpios > 0) {
                int i;
 
-               hw->gpios = kzalloc(sizeof(int) * num_gpios, GFP_KERNEL);
+               hw->gpios = kcalloc(num_gpios, sizeof(*hw->gpios), GFP_KERNEL);
                if (!hw->gpios) {
                        ret = -ENOMEM;
                        goto free_master;
@@ -428,8 +428,9 @@ static int spi_ppc4xx_of_probe(struct platform_device *op)
                                /* Real CS - set the initial state. */
                                ret = gpio_request(gpio, np->name);
                                if (ret < 0) {
-                                       dev_err(dev, "can't request gpio "
-                                                       "#%d: %d\n", i, ret);
+                                       dev_err(dev,
+                                               "can't request gpio #%d: %d\n",
+                                               i, ret);
                                        goto free_gpios;
                                }
 
index 58d2d48e16a530869528288d50f3a6581c3ea311..869f188b02eb3b92e994400c886ba631c95f2805 100644 (file)
@@ -41,6 +41,13 @@ struct pxa_spi_info {
 static struct dw_dma_slave byt_tx_param = { .dst_id = 0 };
 static struct dw_dma_slave byt_rx_param = { .src_id = 1 };
 
+static struct dw_dma_slave mrfld3_tx_param = { .dst_id = 15 };
+static struct dw_dma_slave mrfld3_rx_param = { .src_id = 14 };
+static struct dw_dma_slave mrfld5_tx_param = { .dst_id = 13 };
+static struct dw_dma_slave mrfld5_rx_param = { .src_id = 12 };
+static struct dw_dma_slave mrfld6_tx_param = { .dst_id = 11 };
+static struct dw_dma_slave mrfld6_rx_param = { .src_id = 10 };
+
 static struct dw_dma_slave bsw0_tx_param = { .dst_id = 0 };
 static struct dw_dma_slave bsw0_rx_param = { .src_id = 1 };
 static struct dw_dma_slave bsw1_tx_param = { .dst_id = 6 };
@@ -93,22 +100,39 @@ static int lpss_spi_setup(struct pci_dev *dev, struct pxa_spi_info *c)
 
 static int mrfld_spi_setup(struct pci_dev *dev, struct pxa_spi_info *c)
 {
+       struct pci_dev *dma_dev = pci_get_slot(dev->bus, PCI_DEVFN(21, 0));
+       struct dw_dma_slave *tx, *rx;
+
        switch (PCI_FUNC(dev->devfn)) {
        case 0:
                c->port_id = 3;
                c->num_chipselect = 1;
+               c->tx_param = &mrfld3_tx_param;
+               c->rx_param = &mrfld3_rx_param;
                break;
        case 1:
                c->port_id = 5;
                c->num_chipselect = 4;
+               c->tx_param = &mrfld5_tx_param;
+               c->rx_param = &mrfld5_rx_param;
                break;
        case 2:
                c->port_id = 6;
                c->num_chipselect = 1;
+               c->tx_param = &mrfld6_tx_param;
+               c->rx_param = &mrfld6_rx_param;
                break;
        default:
                return -ENODEV;
        }
+
+       tx = c->tx_param;
+       tx->dma_dev = &dma_dev->dev;
+
+       rx = c->rx_param;
+       rx->dma_dev = &dma_dev->dev;
+
+       c->dma_filter = lpss_dma_filter;
        return 0;
 }
 
@@ -203,10 +227,16 @@ static int pxa2xx_spi_pci_probe(struct pci_dev *dev,
        ssp = &spi_pdata.ssp;
        ssp->phys_base = pci_resource_start(dev, 0);
        ssp->mmio_base = pcim_iomap_table(dev)[0];
-       ssp->irq = dev->irq;
        ssp->port_id = (c->port_id >= 0) ? c->port_id : dev->devfn;
        ssp->type = c->type;
 
+       pci_set_master(dev);
+
+       ret = pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_ALL_TYPES);
+       if (ret < 0)
+               return ret;
+       ssp->irq = pci_irq_vector(dev, 0);
+
        snprintf(buf, sizeof(buf), "pxa2xx-spi.%d", ssp->port_id);
        ssp->clk = clk_register_fixed_rate(&dev->dev, buf , NULL, 0,
                                           c->max_clk_rate);
index d6239fa718be9e251f577b9d9dd792a0e5c5ead5..47b65d7c40721eaf39fc0ba0e9dfa016a5407cfa 100644 (file)
@@ -732,6 +732,20 @@ static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
        return IRQ_HANDLED;
 }
 
+static void handle_bad_msg(struct driver_data *drv_data)
+{
+       pxa2xx_spi_write(drv_data, SSCR0,
+                        pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE);
+       pxa2xx_spi_write(drv_data, SSCR1,
+                        pxa2xx_spi_read(drv_data, SSCR1) & ~drv_data->int_cr1);
+       if (!pxa25x_ssp_comp(drv_data))
+               pxa2xx_spi_write(drv_data, SSTO, 0);
+       write_SSSR_CS(drv_data, drv_data->clear_sr);
+
+       dev_err(&drv_data->pdev->dev,
+               "bad message state in interrupt handler\n");
+}
+
 static irqreturn_t ssp_int(int irq, void *dev_id)
 {
        struct driver_data *drv_data = dev_id;
@@ -771,21 +785,11 @@ static irqreturn_t ssp_int(int irq, void *dev_id)
        if (!(status & mask))
                return IRQ_NONE;
 
-       if (!drv_data->master->cur_msg) {
-
-               pxa2xx_spi_write(drv_data, SSCR0,
-                                pxa2xx_spi_read(drv_data, SSCR0)
-                                & ~SSCR0_SSE);
-               pxa2xx_spi_write(drv_data, SSCR1,
-                                pxa2xx_spi_read(drv_data, SSCR1)
-                                & ~drv_data->int_cr1);
-               if (!pxa25x_ssp_comp(drv_data))
-                       pxa2xx_spi_write(drv_data, SSTO, 0);
-               write_SSSR_CS(drv_data, drv_data->clear_sr);
-
-               dev_err(&drv_data->pdev->dev,
-                       "bad message state in interrupt handler\n");
+       pxa2xx_spi_write(drv_data, SSCR1, sccr1_reg & ~drv_data->int_cr1);
+       pxa2xx_spi_write(drv_data, SSCR1, sccr1_reg);
 
+       if (!drv_data->master->cur_msg) {
+               handle_bad_msg(drv_data);
                /* Never fail */
                return IRQ_HANDLED;
        }
@@ -1458,6 +1462,10 @@ static const struct pci_device_id pxa2xx_spi_pci_compound_match[] = {
        { PCI_VDEVICE(INTEL, 0x1ac2), LPSS_BXT_SSP },
        { PCI_VDEVICE(INTEL, 0x1ac4), LPSS_BXT_SSP },
        { PCI_VDEVICE(INTEL, 0x1ac6), LPSS_BXT_SSP },
+       /* GLK */
+       { PCI_VDEVICE(INTEL, 0x31c2), LPSS_BXT_SSP },
+       { PCI_VDEVICE(INTEL, 0x31c4), LPSS_BXT_SSP },
+       { PCI_VDEVICE(INTEL, 0x31c6), LPSS_BXT_SSP },
        /* APL */
        { PCI_VDEVICE(INTEL, 0x5ac2), LPSS_BXT_SSP },
        { PCI_VDEVICE(INTEL, 0x5ac4), LPSS_BXT_SSP },
index 0f89c2169c244e433a36e7cc7e672d385d3c6560..acf31f36b89856bcf0a3b9c7d0ae113d6e5589cb 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/dmaengine.h>
 #include <linux/module.h>
 #include <linux/of.h>
+#include <linux/pinctrl/consumer.h>
 #include <linux/platform_device.h>
 #include <linux/spi/spi.h>
 #include <linux/pm_runtime.h>
@@ -843,6 +844,8 @@ static int rockchip_spi_suspend(struct device *dev)
                clk_disable_unprepare(rs->apb_pclk);
        }
 
+       pinctrl_pm_select_sleep_state(dev);
+
        return ret;
 }
 
@@ -852,6 +855,8 @@ static int rockchip_spi_resume(struct device *dev)
        struct spi_master *master = dev_get_drvdata(dev);
        struct rockchip_spi *rs = spi_master_get_devdata(master);
 
+       pinctrl_pm_select_default_state(dev);
+
        if (!pm_runtime_suspended(dev)) {
                ret = clk_prepare_enable(rs->apb_pclk);
                if (ret < 0)
index 9daf500317376bd143cb56eb4b14738d11adbdca..2a10b3f94ff72a4ea3d6924a082ef6d2db174528 100644 (file)
@@ -808,7 +808,7 @@ static int qspi_transfer_out(struct rspi_data *rspi, struct spi_transfer *xfer)
                        for (i = 0; i < len; i++)
                                rspi_write_data(rspi, *tx++);
                } else {
-                       ret = rspi_pio_transfer(rspi, tx, NULL, n);
+                       ret = rspi_pio_transfer(rspi, tx, NULL, len);
                        if (ret < 0)
                                return ret;
                }
@@ -845,10 +845,9 @@ static int qspi_transfer_in(struct rspi_data *rspi, struct spi_transfer *xfer)
                        for (i = 0; i < len; i++)
                                *rx++ = rspi_read_data(rspi);
                } else {
-                       ret = rspi_pio_transfer(rspi, NULL, rx, n);
+                       ret = rspi_pio_transfer(rspi, NULL, rx, len);
                        if (ret < 0)
                                return ret;
-                       *rx++ = ret;
                }
                n -= len;
        }
@@ -1227,10 +1226,8 @@ static int rspi_probe(struct platform_device *pdev)
        const struct spi_ops *ops;
 
        master = spi_alloc_master(&pdev->dev, sizeof(struct rspi_data));
-       if (master == NULL) {
-               dev_err(&pdev->dev, "spi_alloc_master error.\n");
+       if (master == NULL)
                return -ENOMEM;
-       }
 
        of_id = of_match_device(rspi_of_match, &pdev->dev);
        if (of_id) {
index 28dfdce4beae4d5fe3d983eb11a78a4c9a70a840..b392cca8fa4f5ba3c1c499ea8b1bc228fcabdbee 100644 (file)
@@ -341,43 +341,16 @@ static void s3c64xx_spi_set_cs(struct spi_device *spi, bool enable)
 static int s3c64xx_spi_prepare_transfer(struct spi_master *spi)
 {
        struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);
-       struct device *dev = &sdd->pdev->dev;
 
        if (is_polling(sdd))
                return 0;
 
-       /* Acquire DMA channels */
-       sdd->rx_dma.ch = dma_request_slave_channel(dev, "rx");
-       if (!sdd->rx_dma.ch) {
-               dev_err(dev, "Failed to get RX DMA channel\n");
-               return -EBUSY;
-       }
        spi->dma_rx = sdd->rx_dma.ch;
-
-       sdd->tx_dma.ch = dma_request_slave_channel(dev, "tx");
-       if (!sdd->tx_dma.ch) {
-               dev_err(dev, "Failed to get TX DMA channel\n");
-               dma_release_channel(sdd->rx_dma.ch);
-               return -EBUSY;
-       }
        spi->dma_tx = sdd->tx_dma.ch;
 
        return 0;
 }
 
-static int s3c64xx_spi_unprepare_transfer(struct spi_master *spi)
-{
-       struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);
-
-       /* Free DMA channels */
-       if (!is_polling(sdd)) {
-               dma_release_channel(sdd->rx_dma.ch);
-               dma_release_channel(sdd->tx_dma.ch);
-       }
-
-       return 0;
-}
-
 static bool s3c64xx_spi_can_dma(struct spi_master *master,
                                struct spi_device *spi,
                                struct spi_transfer *xfer)
@@ -996,7 +969,7 @@ static struct s3c64xx_spi_info *s3c64xx_spi_parse_dt(struct device *dev)
                sci->num_cs = temp;
        }
 
-       sci->no_cs = of_property_read_bool(dev->of_node, "broken-cs");
+       sci->no_cs = of_property_read_bool(dev->of_node, "no-cs-readback");
 
        return sci;
 }
@@ -1094,7 +1067,6 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)
        master->prepare_transfer_hardware = s3c64xx_spi_prepare_transfer;
        master->prepare_message = s3c64xx_spi_prepare_message;
        master->transfer_one = s3c64xx_spi_transfer_one;
-       master->unprepare_transfer_hardware = s3c64xx_spi_unprepare_transfer;
        master->num_chipselect = sci->num_cs;
        master->dma_alignment = 8;
        master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) |
@@ -1161,6 +1133,24 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)
                }
        }
 
+       if (!is_polling(sdd)) {
+               /* Acquire DMA channels */
+               sdd->rx_dma.ch = dma_request_slave_channel_reason(&pdev->dev,
+                                                                 "rx");
+               if (IS_ERR(sdd->rx_dma.ch)) {
+                       dev_err(&pdev->dev, "Failed to get RX DMA channel\n");
+                       ret = PTR_ERR(sdd->rx_dma.ch);
+                       goto err_disable_io_clk;
+               }
+               sdd->tx_dma.ch = dma_request_slave_channel_reason(&pdev->dev,
+                                                                 "tx");
+               if (IS_ERR(sdd->tx_dma.ch)) {
+                       dev_err(&pdev->dev, "Failed to get TX DMA channel\n");
+                       ret = PTR_ERR(sdd->tx_dma.ch);
+                       goto err_release_rx_dma;
+               }
+       }
+
        pm_runtime_set_autosuspend_delay(&pdev->dev, AUTOSUSPEND_TIMEOUT);
        pm_runtime_use_autosuspend(&pdev->dev);
        pm_runtime_set_active(&pdev->dev);
@@ -1206,6 +1196,12 @@ err_pm_put:
        pm_runtime_disable(&pdev->dev);
        pm_runtime_set_suspended(&pdev->dev);
 
+       if (!is_polling(sdd))
+               dma_release_channel(sdd->tx_dma.ch);
+err_release_rx_dma:
+       if (!is_polling(sdd))
+               dma_release_channel(sdd->rx_dma.ch);
+err_disable_io_clk:
        clk_disable_unprepare(sdd->ioclk);
 err_disable_src_clk:
        clk_disable_unprepare(sdd->src_clk);
@@ -1226,6 +1222,11 @@ static int s3c64xx_spi_remove(struct platform_device *pdev)
 
        writel(0, sdd->regs + S3C64XX_SPI_INT_EN);
 
+       if (!is_polling(sdd)) {
+               dma_release_channel(sdd->rx_dma.ch);
+               dma_release_channel(sdd->tx_dma.ch);
+       }
+
        clk_disable_unprepare(sdd->ioclk);
 
        clk_disable_unprepare(sdd->src_clk);
index 1f00eeb0b5a3fb93ae838978dbf7815d94897378..2ce15ca977828668e0fa8e186b6b9a8393cdc70d 100644 (file)
@@ -1164,10 +1164,8 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
        int ret;
 
        master = spi_alloc_master(&pdev->dev, sizeof(struct sh_msiof_spi_priv));
-       if (master == NULL) {
-               dev_err(&pdev->dev, "failed to allocate spi master\n");
+       if (master == NULL)
                return -ENOMEM;
-       }
 
        p = spi_master_get_devdata(master);
 
index ec6fb09e2e1711f7adfbe079559a89f1a9cd343d..ad76a44fee6f4e9df0e2bb6e46354599e907192a 100644 (file)
@@ -652,7 +652,8 @@ static int ti_qspi_probe(struct platform_device *pdev)
                r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
                if (r == NULL) {
                        dev_err(&pdev->dev, "missing platform data\n");
-                       return -ENODEV;
+                       ret = -ENODEV;
+                       goto free_master;
                }
        }
 
@@ -669,7 +670,8 @@ static int ti_qspi_probe(struct platform_device *pdev)
        irq = platform_get_irq(pdev, 0);
        if (irq < 0) {
                dev_err(&pdev->dev, "no irq resource?\n");
-               return irq;
+               ret = irq;
+               goto free_master;
        }
 
        mutex_init(&qspi->list_lock);
@@ -685,15 +687,17 @@ static int ti_qspi_probe(struct platform_device *pdev)
                qspi->ctrl_base =
                syscon_regmap_lookup_by_phandle(np,
                                                "syscon-chipselects");
-               if (IS_ERR(qspi->ctrl_base))
-                       return PTR_ERR(qspi->ctrl_base);
+               if (IS_ERR(qspi->ctrl_base)) {
+                       ret = PTR_ERR(qspi->ctrl_base);
+                       goto free_master;
+               }
                ret = of_property_read_u32_index(np,
                                                 "syscon-chipselects",
                                                 1, &qspi->ctrl_reg);
                if (ret) {
                        dev_err(&pdev->dev,
                                "couldn't get ctrl_mod reg index\n");
-                       return ret;
+                       goto free_master;
                }
        }
 
@@ -714,9 +718,10 @@ static int ti_qspi_probe(struct platform_device *pdev)
        dma_cap_set(DMA_MEMCPY, mask);
 
        qspi->rx_chan = dma_request_chan_by_mask(&mask);
-       if (!qspi->rx_chan) {
+       if (IS_ERR(qspi->rx_chan)) {
                dev_err(qspi->dev,
                        "No Rx DMA available, trying mmap mode\n");
+               qspi->rx_chan = NULL;
                ret = 0;
                goto no_dma;
        }
@@ -742,6 +747,7 @@ no_dma:
        if (!ret)
                return 0;
 
+       pm_runtime_disable(&pdev->dev);
 free_master:
        spi_master_put(master);
        return ret;
index fcb991034c3d076bc032545addca93517017b126..97d137591b18d5fe12359e5a8865fd00b93a9118 100644 (file)
@@ -591,7 +591,6 @@ static void pch_spi_set_tx(struct pch_spi_data *data, int *bpw)
 
        if (!data->pkt_rx_buff) {
                /* flush queue and set status of all transfers to -ENOMEM */
-               dev_err(&data->master->dev, "%s :kzalloc failed\n", __func__);
                list_for_each_entry_safe(pmsg, tmp, data->queue.next, queue) {
                        pmsg->status = -ENOMEM;
 
@@ -622,8 +621,9 @@ static void pch_spi_set_tx(struct pch_spi_data *data, int *bpw)
        if (n_writes > PCH_MAX_FIFO_DEPTH)
                n_writes = PCH_MAX_FIFO_DEPTH;
 
-       dev_dbg(&data->master->dev, "\n%s:Pulling down SSN low - writing "
-               "0x2 to SSNXCR\n", __func__);
+       dev_dbg(&data->master->dev,
+               "\n%s:Pulling down SSN low - writing 0x2 to SSNXCR\n",
+               __func__);
        pch_spi_writereg(data->master, PCH_SSNXCR, SSN_LOW);
 
        for (j = 0; j < n_writes; j++)
@@ -915,7 +915,6 @@ static void pch_spi_release_dma(struct pch_spi_data *data)
                dma_release_channel(dma->chan_rx);
                dma->chan_rx = NULL;
        }
-       return;
 }
 
 static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
@@ -1008,7 +1007,7 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
        spin_unlock_irqrestore(&data->lock, flags);
 
        /* RX */
-       dma->sg_rx_p = kzalloc(sizeof(struct scatterlist)*num, GFP_ATOMIC);
+       dma->sg_rx_p = kcalloc(num, sizeof(*dma->sg_rx_p), GFP_ATOMIC);
        sg_init_table(dma->sg_rx_p, num); /* Initialize SG table */
        /* offset, length setting */
        sg = dma->sg_rx_p;
@@ -1068,7 +1067,7 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
                head = 0;
        }
 
-       dma->sg_tx_p = kzalloc(sizeof(struct scatterlist)*num, GFP_ATOMIC);
+       dma->sg_tx_p = kcalloc(num, sizeof(*dma->sg_tx_p), GFP_ATOMIC);
        sg_init_table(dma->sg_tx_p, num); /* Initialize SG table */
        /* offset, length setting */
        sg = dma->sg_tx_p;
@@ -1181,14 +1180,16 @@ static void pch_spi_process_messages(struct work_struct *pwork)
                        data->cur_trans =
                                list_entry(data->current_msg->transfers.next,
                                           struct spi_transfer, transfer_list);
-                       dev_dbg(&data->master->dev, "%s "
-                               ":Getting 1st transfer message\n", __func__);
+                       dev_dbg(&data->master->dev,
+                               "%s :Getting 1st transfer message\n",
+                               __func__);
                } else {
                        data->cur_trans =
                                list_entry(data->cur_trans->transfer_list.next,
                                           struct spi_transfer, transfer_list);
-                       dev_dbg(&data->master->dev, "%s "
-                               ":Getting next transfer message\n", __func__);
+                       dev_dbg(&data->master->dev,
+                               "%s :Getting next transfer message\n",
+                               __func__);
                }
                spin_unlock(&data->lock);
 
@@ -1233,9 +1234,8 @@ static void pch_spi_process_messages(struct work_struct *pwork)
 
                /* check for delay */
                if (data->cur_trans->delay_usecs) {
-                       dev_dbg(&data->master->dev, "%s:"
-                               "delay in usec=%d\n", __func__,
-                               data->cur_trans->delay_usecs);
+                       dev_dbg(&data->master->dev, "%s:delay in usec=%d\n",
+                               __func__, data->cur_trans->delay_usecs);
                        udelay(data->cur_trans->delay_usecs);
                }
 
@@ -1292,7 +1292,6 @@ static void pch_free_dma_buf(struct pch_spi_board_data *board_dat,
        if (dma->rx_buf_dma)
                dma_free_coherent(&board_dat->pdev->dev, PCH_BUF_SIZE,
                                  dma->rx_buf_virt, dma->rx_buf_dma);
-       return;
 }
 
 static void pch_alloc_dma_buf(struct pch_spi_board_data *board_dat,
@@ -1541,11 +1540,11 @@ static int pch_spi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        int i;
        struct pch_pd_dev_save *pd_dev_save;
 
-       pd_dev_save = kzalloc(sizeof(struct pch_pd_dev_save), GFP_KERNEL);
+       pd_dev_save = kzalloc(sizeof(*pd_dev_save), GFP_KERNEL);
        if (!pd_dev_save)
                return -ENOMEM;
 
-       board_dat = kzalloc(sizeof(struct pch_spi_board_data), GFP_KERNEL);
+       board_dat = kzalloc(sizeof(*board_dat), GFP_KERNEL);
        if (!board_dat) {
                retval = -ENOMEM;
                goto err_no_mem;
index 656dd3e3220c5062b43b5a162a078f245f90630c..f274df9e0e3e2265c723139bb2b2f92ad72e32ba 100644 (file)
@@ -621,8 +621,10 @@ void spi_unregister_device(struct spi_device *spi)
        if (!spi)
                return;
 
-       if (spi->dev.of_node)
+       if (spi->dev.of_node) {
                of_node_clear_flag(spi->dev.of_node, OF_POPULATED);
+               of_node_put(spi->dev.of_node);
+       }
        if (ACPI_COMPANION(&spi->dev))
                acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev));
        device_unregister(&spi->dev);
@@ -672,7 +674,7 @@ int spi_register_board_info(struct spi_board_info const *info, unsigned n)
        if (!n)
                return -EINVAL;
 
-       bi = kzalloc(n * sizeof(*bi), GFP_KERNEL);
+       bi = kcalloc(n, sizeof(*bi), GFP_KERNEL);
        if (!bi)
                return -ENOMEM;
 
@@ -805,12 +807,12 @@ static int __spi_map_msg(struct spi_master *master, struct spi_message *msg)
        if (master->dma_tx)
                tx_dev = master->dma_tx->device->dev;
        else
-               tx_dev = &master->dev;
+               tx_dev = master->dev.parent;
 
        if (master->dma_rx)
                rx_dev = master->dma_rx->device->dev;
        else
-               rx_dev = &master->dev;
+               rx_dev = master->dev.parent;
 
        list_for_each_entry(xfer, &msg->transfers, transfer_list) {
                if (!master->can_dma(master, msg->spi, xfer))
@@ -852,12 +854,12 @@ static int __spi_unmap_msg(struct spi_master *master, struct spi_message *msg)
        if (master->dma_tx)
                tx_dev = master->dma_tx->device->dev;
        else
-               tx_dev = &master->dev;
+               tx_dev = master->dev.parent;
 
        if (master->dma_rx)
                rx_dev = master->dma_rx->device->dev;
        else
-               rx_dev = &master->dev;
+               rx_dev = master->dev.parent;
 
        list_for_each_entry(xfer, &msg->transfers, transfer_list) {
                if (!master->can_dma(master, msg->spi, xfer))
@@ -1502,37 +1504,18 @@ err_init_queue:
 /*-------------------------------------------------------------------------*/
 
 #if defined(CONFIG_OF)
-static struct spi_device *
-of_register_spi_device(struct spi_master *master, struct device_node *nc)
+static int of_spi_parse_dt(struct spi_master *master, struct spi_device *spi,
+                          struct device_node *nc)
 {
-       struct spi_device *spi;
-       int rc;
        u32 value;
-
-       /* Alloc an spi_device */
-       spi = spi_alloc_device(master);
-       if (!spi) {
-               dev_err(&master->dev, "spi_device alloc error for %s\n",
-                       nc->full_name);
-               rc = -ENOMEM;
-               goto err_out;
-       }
-
-       /* Select device driver */
-       rc = of_modalias_node(nc, spi->modalias,
-                               sizeof(spi->modalias));
-       if (rc < 0) {
-               dev_err(&master->dev, "cannot find modalias for %s\n",
-                       nc->full_name);
-               goto err_out;
-       }
+       int rc;
 
        /* Device address */
        rc = of_property_read_u32(nc, "reg", &value);
        if (rc) {
                dev_err(&master->dev, "%s has no valid 'reg' property (%d)\n",
                        nc->full_name, rc);
-               goto err_out;
+               return rc;
        }
        spi->chip_select = value;
 
@@ -1590,10 +1573,41 @@ of_register_spi_device(struct spi_master *master, struct device_node *nc)
        if (rc) {
                dev_err(&master->dev, "%s has no valid 'spi-max-frequency' property (%d)\n",
                        nc->full_name, rc);
-               goto err_out;
+               return rc;
        }
        spi->max_speed_hz = value;
 
+       return 0;
+}
+
+static struct spi_device *
+of_register_spi_device(struct spi_master *master, struct device_node *nc)
+{
+       struct spi_device *spi;
+       int rc;
+
+       /* Alloc an spi_device */
+       spi = spi_alloc_device(master);
+       if (!spi) {
+               dev_err(&master->dev, "spi_device alloc error for %s\n",
+                       nc->full_name);
+               rc = -ENOMEM;
+               goto err_out;
+       }
+
+       /* Select device driver */
+       rc = of_modalias_node(nc, spi->modalias,
+                               sizeof(spi->modalias));
+       if (rc < 0) {
+               dev_err(&master->dev, "cannot find modalias for %s\n",
+                       nc->full_name);
+               goto err_out;
+       }
+
+       rc = of_spi_parse_dt(master, spi, nc);
+       if (rc)
+               goto err_out;
+
        /* Store a pointer to the node in the device structure */
        of_node_get(nc);
        spi->dev.of_node = nc;
@@ -1603,11 +1617,13 @@ of_register_spi_device(struct spi_master *master, struct device_node *nc)
        if (rc) {
                dev_err(&master->dev, "spi_device register error %s\n",
                        nc->full_name);
-               goto err_out;
+               goto err_of_node_put;
        }
 
        return spi;
 
+err_of_node_put:
+       of_node_put(nc);
 err_out:
        spi_dev_put(spi);
        return ERR_PTR(rc);
index b653451843c84e81cdbea96fc0c8c006832e24c1..937c2d5d7ec3f922d17fa38ceaf9f67f43a0a87b 100644 (file)
@@ -1300,7 +1300,7 @@ static int ion_debug_heap_show(struct seq_file *s, void *unused)
                        seq_printf(s, "%16s %16u %16zu %d %d\n",
                                   buffer->task_comm, buffer->pid,
                                   buffer->size, buffer->kmap_cnt,
-                                  atomic_read(&buffer->ref.refcount));
+                                  kref_read(&buffer->ref));
                        total_orphaned_size += buffer->size;
                }
        }
index c7d7682b1412ba7b7cc8730a363b84aeed493adc..1e1df89b50181e97a6df0fbed0d6ba54c01439a2 100644 (file)
@@ -188,7 +188,7 @@ bool comedi_buf_is_mmapped(struct comedi_subdevice *s)
 {
        struct comedi_buf_map *bm = s->async->buf_map;
 
-       return bm && (atomic_read(&bm->refcount.refcount) > 1);
+       return bm && (kref_read(&bm->refcount) > 1);
 }
 
 int comedi_buf_alloc(struct comedi_device *dev, struct comedi_subdevice *s,
index 113f3d6c4b3a6cdeda3fce3abe729fbc927fd9d9..27f75b17679b8f19a5f6100a769dd7b1b0f77455 100644 (file)
@@ -45,12 +45,18 @@ u32 gb_timesync_platform_get_clock_rate(void)
 
 int gb_timesync_platform_lock_bus(struct gb_timesync_svc *pdata)
 {
+       if (!arche_platform_change_state_cb)
+               return 0;
+
        return arche_platform_change_state_cb(ARCHE_PLATFORM_STATE_TIME_SYNC,
                                              pdata);
 }
 
 void gb_timesync_platform_unlock_bus(void)
 {
+       if (!arche_platform_change_state_cb)
+               return;
+
        arche_platform_change_state_cb(ARCHE_PLATFORM_STATE_ACTIVE, NULL);
 }
 
index 39a72e3f0c18fca4bacdc2aa15c7d1d586b51cb6..7035356e56b33ab38aef0641af41e6d2f18901e1 100644 (file)
@@ -107,7 +107,7 @@ void __noreturn lbug_with_loc(struct libcfs_debug_msg_data *msgdata)
                libcfs_debug_dumplog();
        if (libcfs_panic_on_lbug)
                panic("LBUG");
-       set_task_state(current, TASK_UNINTERRUPTIBLE);
+       set_current_state(TASK_UNINTERRUPTIBLE);
        while (1)
                schedule();
 }
index ee01f20d8b1110bec42a3aebf82ef69b79e63d80..9afa6bec3e6f44e22f1bf4fbcbd76e4476348fb3 100644 (file)
@@ -390,15 +390,13 @@ static int ll_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
                result = VM_FAULT_LOCKED;
                break;
        case -ENODATA:
+       case -EAGAIN:
        case -EFAULT:
                result = VM_FAULT_NOPAGE;
                break;
        case -ENOMEM:
                result = VM_FAULT_OOM;
                break;
-       case -EAGAIN:
-               result = VM_FAULT_RETRY;
-               break;
        default:
                result = VM_FAULT_SIGBUS;
                break;
index 1ebd13ef7bd333c5cbc488f7543eda58e29a2123..26929c44d70316d19a5691a400c0f87b4f63ff9d 100644 (file)
@@ -352,7 +352,15 @@ int core_enable_device_list_for_node(
                        kfree(new);
                        return -EINVAL;
                }
-               BUG_ON(orig->se_lun_acl != NULL);
+               if (orig->se_lun_acl != NULL) {
+                       pr_warn_ratelimited("Detected existing explicit"
+                               " se_lun_acl->se_lun_group reference for %s"
+                               " mapped_lun: %llu, failing\n",
+                                nacl->initiatorname, mapped_lun);
+                       mutex_unlock(&nacl->lun_entry_mutex);
+                       kfree(new);
+                       return -EINVAL;
+               }
 
                rcu_assign_pointer(new->se_lun, lun);
                rcu_assign_pointer(new->se_lun_acl, lun_acl);
index d761025144f9dc178cc43d4803b4c79b0147815b..e18051185846ad2824ed7aef82ea66ca2898d83c 100644 (file)
@@ -788,7 +788,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
                         * __core_scsi3_add_registration()
                         */
                        dest_lun = rcu_dereference_check(deve_tmp->se_lun,
-                               atomic_read(&deve_tmp->pr_kref.refcount) != 0);
+                               kref_read(&deve_tmp->pr_kref) != 0);
 
                        pr_reg_atp = __core_scsi3_do_alloc_registration(dev,
                                                nacl_tmp, dest_lun, deve_tmp,
@@ -1463,7 +1463,7 @@ static int core_scsi3_lunacl_depend_item(struct se_dev_entry *se_deve)
         * For nacl->dynamic_node_acl=1
         */
        lun_acl = rcu_dereference_check(se_deve->se_lun_acl,
-                               atomic_read(&se_deve->pr_kref.refcount) != 0);
+                               kref_read(&se_deve->pr_kref) != 0);
        if (!lun_acl)
                return 0;
 
@@ -1478,7 +1478,7 @@ static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve)
         * For nacl->dynamic_node_acl=1
         */
        lun_acl = rcu_dereference_check(se_deve->se_lun_acl,
-                               atomic_read(&se_deve->pr_kref.refcount) != 0);
+                               kref_read(&se_deve->pr_kref) != 0);
        if (!lun_acl) {
                kref_put(&se_deve->pr_kref, target_pr_kref_release);
                return;
@@ -1759,7 +1759,7 @@ core_scsi3_decode_spec_i_port(
                 * 2nd loop which will never fail.
                 */
                dest_lun = rcu_dereference_check(dest_se_deve->se_lun,
-                               atomic_read(&dest_se_deve->pr_kref.refcount) != 0);
+                               kref_read(&dest_se_deve->pr_kref) != 0);
 
                dest_pr_reg = __core_scsi3_alloc_registration(cmd->se_dev,
                                        dest_node_acl, dest_lun, dest_se_deve,
@@ -3466,7 +3466,7 @@ after_iport_check:
                                        iport_ptr);
        if (!dest_pr_reg) {
                struct se_lun *dest_lun = rcu_dereference_check(dest_se_deve->se_lun,
-                               atomic_read(&dest_se_deve->pr_kref.refcount) != 0);
+                               kref_read(&dest_se_deve->pr_kref) != 0);
 
                spin_unlock(&dev->dev_reservation_lock);
                if (core_scsi3_alloc_registration(cmd->se_dev, dest_node_acl,
index 4879e70e2eefb68ddc229effbe4a9822f369ce3f..df7b6e95c019dd91ee56671d93e03a3356fb51b9 100644 (file)
@@ -451,6 +451,7 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success,
                                             int *post_ret)
 {
        struct se_device *dev = cmd->se_dev;
+       sense_reason_t ret = TCM_NO_SENSE;
 
        /*
         * Only set SCF_COMPARE_AND_WRITE_POST to force a response fall-through
@@ -458,9 +459,12 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success,
         * sent to the backend driver.
         */
        spin_lock_irq(&cmd->t_state_lock);
-       if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status) {
+       if (cmd->transport_state & CMD_T_SENT) {
                cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST;
                *post_ret = 1;
+
+               if (cmd->scsi_status == SAM_STAT_CHECK_CONDITION)
+                       ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
        }
        spin_unlock_irq(&cmd->t_state_lock);
 
@@ -470,7 +474,7 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success,
         */
        up(&dev->caw_sem);
 
-       return TCM_NO_SENSE;
+       return ret;
 }
 
 static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success,
index 1cadc9eefa21a47e783160b874dbd2ce02f8f05f..437591bc7c0855d85102be0ca42d093531004343 100644 (file)
@@ -457,8 +457,20 @@ static void target_complete_nacl(struct kref *kref)
 {
        struct se_node_acl *nacl = container_of(kref,
                                struct se_node_acl, acl_kref);
+       struct se_portal_group *se_tpg = nacl->se_tpg;
 
-       complete(&nacl->acl_free_comp);
+       if (!nacl->dynamic_stop) {
+               complete(&nacl->acl_free_comp);
+               return;
+       }
+
+       mutex_lock(&se_tpg->acl_node_mutex);
+       list_del(&nacl->acl_list);
+       mutex_unlock(&se_tpg->acl_node_mutex);
+
+       core_tpg_wait_for_nacl_pr_ref(nacl);
+       core_free_device_list_for_node(nacl, se_tpg);
+       kfree(nacl);
 }
 
 void target_put_nacl(struct se_node_acl *nacl)
@@ -499,12 +511,39 @@ EXPORT_SYMBOL(transport_deregister_session_configfs);
 void transport_free_session(struct se_session *se_sess)
 {
        struct se_node_acl *se_nacl = se_sess->se_node_acl;
+
        /*
         * Drop the se_node_acl->nacl_kref obtained from within
         * core_tpg_get_initiator_node_acl().
         */
        if (se_nacl) {
+               struct se_portal_group *se_tpg = se_nacl->se_tpg;
+               const struct target_core_fabric_ops *se_tfo = se_tpg->se_tpg_tfo;
+               unsigned long flags;
+
                se_sess->se_node_acl = NULL;
+
+               /*
+                * Also determine if we need to drop the extra ->cmd_kref if
+                * it had been previously dynamically generated, and
+                * the endpoint is not caching dynamic ACLs.
+                */
+               mutex_lock(&se_tpg->acl_node_mutex);
+               if (se_nacl->dynamic_node_acl &&
+                   !se_tfo->tpg_check_demo_mode_cache(se_tpg)) {
+                       spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
+                       if (list_empty(&se_nacl->acl_sess_list))
+                               se_nacl->dynamic_stop = true;
+                       spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
+
+                       if (se_nacl->dynamic_stop)
+                               list_del(&se_nacl->acl_list);
+               }
+               mutex_unlock(&se_tpg->acl_node_mutex);
+
+               if (se_nacl->dynamic_stop)
+                       target_put_nacl(se_nacl);
+
                target_put_nacl(se_nacl);
        }
        if (se_sess->sess_cmd_map) {
@@ -518,16 +557,12 @@ EXPORT_SYMBOL(transport_free_session);
 void transport_deregister_session(struct se_session *se_sess)
 {
        struct se_portal_group *se_tpg = se_sess->se_tpg;
-       const struct target_core_fabric_ops *se_tfo;
-       struct se_node_acl *se_nacl;
        unsigned long flags;
-       bool drop_nacl = false;
 
        if (!se_tpg) {
                transport_free_session(se_sess);
                return;
        }
-       se_tfo = se_tpg->se_tpg_tfo;
 
        spin_lock_irqsave(&se_tpg->session_lock, flags);
        list_del(&se_sess->sess_list);
@@ -535,33 +570,15 @@ void transport_deregister_session(struct se_session *se_sess)
        se_sess->fabric_sess_ptr = NULL;
        spin_unlock_irqrestore(&se_tpg->session_lock, flags);
 
-       /*
-        * Determine if we need to do extra work for this initiator node's
-        * struct se_node_acl if it had been previously dynamically generated.
-        */
-       se_nacl = se_sess->se_node_acl;
-
-       mutex_lock(&se_tpg->acl_node_mutex);
-       if (se_nacl && se_nacl->dynamic_node_acl) {
-               if (!se_tfo->tpg_check_demo_mode_cache(se_tpg)) {
-                       list_del(&se_nacl->acl_list);
-                       drop_nacl = true;
-               }
-       }
-       mutex_unlock(&se_tpg->acl_node_mutex);
-
-       if (drop_nacl) {
-               core_tpg_wait_for_nacl_pr_ref(se_nacl);
-               core_free_device_list_for_node(se_nacl, se_tpg);
-               se_sess->se_node_acl = NULL;
-               kfree(se_nacl);
-       }
        pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
                se_tpg->se_tpg_tfo->get_fabric_name());
        /*
         * If last kref is dropping now for an explicit NodeACL, awake sleeping
         * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group
         * removal context from within transport_free_session() code.
+        *
+        * For dynamic ACL, target_put_nacl() uses target_complete_nacl()
+        * to release all remaining generate_node_acl=1 created ACL resources.
         */
 
        transport_free_session(se_sess);
@@ -3110,7 +3127,6 @@ static void target_tmr_work(struct work_struct *work)
                spin_unlock_irqrestore(&cmd->t_state_lock, flags);
                goto check_stop;
        }
-       cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
        spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 
        cmd->se_tfo->queue_tm_rsp(cmd);
@@ -3123,11 +3139,25 @@ int transport_generic_handle_tmr(
        struct se_cmd *cmd)
 {
        unsigned long flags;
+       bool aborted = false;
 
        spin_lock_irqsave(&cmd->t_state_lock, flags);
-       cmd->transport_state |= CMD_T_ACTIVE;
+       if (cmd->transport_state & CMD_T_ABORTED) {
+               aborted = true;
+       } else {
+               cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
+               cmd->transport_state |= CMD_T_ACTIVE;
+       }
        spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 
+       if (aborted) {
+               pr_warn_ratelimited("handle_tmr caught CMD_T_ABORTED TMR %d"
+                       "ref_tag: %llu tag: %llu\n", cmd->se_tmr_req->function,
+                       cmd->se_tmr_req->ref_task_tag, cmd->tag);
+               transport_cmd_check_stop_to_fabric(cmd);
+               return 0;
+       }
+
        INIT_WORK(&cmd->work, target_tmr_work);
        queue_work(cmd->se_dev->tmr_wq, &cmd->work);
        return 0;
index d828b3b5000bf421826b9823efcbac8d6b2d58a3..cac5a20a4de07ba554151c6099182c9e675efbed 100644 (file)
@@ -864,7 +864,7 @@ out:
                        " CHECK_CONDITION -> sending response\n", rc);
                ec_cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
        }
-       target_complete_cmd(ec_cmd, SAM_STAT_CHECK_CONDITION);
+       target_complete_cmd(ec_cmd, ec_cmd->scsi_status);
 }
 
 sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
index fd5c3de794705bb467f8689540f8045e61629d7f..c91979c1463d95899f909bfe62b9f2d47eb2c1cc 100644 (file)
@@ -454,7 +454,7 @@ static void ft_sess_free(struct kref *kref)
 
 void ft_sess_put(struct ft_sess *sess)
 {
-       int sess_held = atomic_read(&sess->kref.refcount);
+       int sess_held = kref_read(&sess->kref);
 
        BUG_ON(!sess_held);
        kref_put(&sess->kref, ft_sess_free);
index c4a508a124dc2b9dca9e7147c3d8dedc69c5016d..541af5946203bf08e9a2c38a269661e1df81d417 100644 (file)
@@ -58,6 +58,14 @@ static LIST_HEAD(thermal_hwmon_list);
 
 static DEFINE_MUTEX(thermal_hwmon_list_lock);
 
+static ssize_t
+name_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       struct thermal_hwmon_device *hwmon = dev_get_drvdata(dev);
+       return sprintf(buf, "%s\n", hwmon->type);
+}
+static DEVICE_ATTR_RO(name);
+
 static ssize_t
 temp_input_show(struct device *dev, struct device_attribute *attr, char *buf)
 {
@@ -157,12 +165,15 @@ int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
 
        INIT_LIST_HEAD(&hwmon->tz_list);
        strlcpy(hwmon->type, tz->type, THERMAL_NAME_LENGTH);
-       hwmon->device = hwmon_device_register_with_info(NULL, hwmon->type,
-                                                       hwmon, NULL, NULL);
+       hwmon->device = hwmon_device_register(NULL);
        if (IS_ERR(hwmon->device)) {
                result = PTR_ERR(hwmon->device);
                goto free_mem;
        }
+       dev_set_drvdata(hwmon->device, hwmon);
+       result = device_create_file(hwmon->device, &dev_attr_name);
+       if (result)
+               goto free_mem;
 
  register_sys_interface:
        temp = kzalloc(sizeof(*temp), GFP_KERNEL);
@@ -211,8 +222,10 @@ int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
  free_temp_mem:
        kfree(temp);
  unregister_name:
-       if (new_hwmon_device)
+       if (new_hwmon_device) {
+               device_remove_file(hwmon->device, &dev_attr_name);
                hwmon_device_unregister(hwmon->device);
+       }
  free_mem:
        if (new_hwmon_device)
                kfree(hwmon);
@@ -254,6 +267,7 @@ void thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz)
        list_del(&hwmon->node);
        mutex_unlock(&thermal_hwmon_list_lock);
 
+       device_remove_file(hwmon->device, &dev_attr_name);
        hwmon_device_unregister(hwmon->device);
        kfree(hwmon);
 }
index 1bf8ed13f82708d417cfc8ff9d670d46a674ee35..9229de43e19d18822cd23551430fdae41edd3784 100644 (file)
@@ -200,7 +200,6 @@ static struct ld_semaphore __sched *
 down_read_failed(struct ld_semaphore *sem, long count, long timeout)
 {
        struct ldsem_waiter waiter;
-       struct task_struct *tsk = current;
        long adjust = -LDSEM_ACTIVE_BIAS + LDSEM_WAIT_BIAS;
 
        /* set up my own style of waitqueue */
@@ -221,8 +220,8 @@ down_read_failed(struct ld_semaphore *sem, long count, long timeout)
        list_add_tail(&waiter.list, &sem->read_wait);
        sem->wait_readers++;
 
-       waiter.task = tsk;
-       get_task_struct(tsk);
+       waiter.task = current;
+       get_task_struct(current);
 
        /* if there are no active locks, wake the new lock owner(s) */
        if ((count & LDSEM_ACTIVE_MASK) == 0)
@@ -232,7 +231,7 @@ down_read_failed(struct ld_semaphore *sem, long count, long timeout)
 
        /* wait to be given the lock */
        for (;;) {
-               set_task_state(tsk, TASK_UNINTERRUPTIBLE);
+               set_current_state(TASK_UNINTERRUPTIBLE);
 
                if (!waiter.task)
                        break;
@@ -241,7 +240,7 @@ down_read_failed(struct ld_semaphore *sem, long count, long timeout)
                timeout = schedule_timeout(timeout);
        }
 
-       __set_task_state(tsk, TASK_RUNNING);
+       __set_current_state(TASK_RUNNING);
 
        if (!timeout) {
                /* lock timed out but check if this task was just
@@ -268,7 +267,6 @@ static struct ld_semaphore __sched *
 down_write_failed(struct ld_semaphore *sem, long count, long timeout)
 {
        struct ldsem_waiter waiter;
-       struct task_struct *tsk = current;
        long adjust = -LDSEM_ACTIVE_BIAS;
        int locked = 0;
 
@@ -289,16 +287,16 @@ down_write_failed(struct ld_semaphore *sem, long count, long timeout)
 
        list_add_tail(&waiter.list, &sem->write_wait);
 
-       waiter.task = tsk;
+       waiter.task = current;
 
-       set_task_state(tsk, TASK_UNINTERRUPTIBLE);
+       set_current_state(TASK_UNINTERRUPTIBLE);
        for (;;) {
                if (!timeout)
                        break;
                raw_spin_unlock_irq(&sem->wait_lock);
                timeout = schedule_timeout(timeout);
                raw_spin_lock_irq(&sem->wait_lock);
-               set_task_state(tsk, TASK_UNINTERRUPTIBLE);
+               set_current_state(TASK_UNINTERRUPTIBLE);
                locked = writer_trylock(sem);
                if (locked)
                        break;
@@ -309,7 +307,7 @@ down_write_failed(struct ld_semaphore *sem, long count, long timeout)
        list_del(&waiter.list);
        raw_spin_unlock_irq(&sem->wait_lock);
 
-       __set_task_state(tsk, TASK_RUNNING);
+       __set_current_state(TASK_RUNNING);
 
        /* lock wait may have timed out */
        if (!locked)
index d2e50a27140c9254be2a80b6c6ae69bc71a93b4a..24f9f98968a5d860f83920287a5b7deb4c98bed6 100644 (file)
@@ -37,6 +37,10 @@ static const struct usb_device_id usb_quirk_list[] = {
        /* CBM - Flash disk */
        { USB_DEVICE(0x0204, 0x6025), .driver_info = USB_QUIRK_RESET_RESUME },
 
+       /* WORLDE easy key (easykey.25) MIDI controller  */
+       { USB_DEVICE(0x0218, 0x0401), .driver_info =
+                       USB_QUIRK_CONFIG_INTF_STRINGS },
+
        /* HP 5300/5370C scanner */
        { USB_DEVICE(0x03f0, 0x0701), .driver_info =
                        USB_QUIRK_STRING_FETCH_255 },
index 5490fc51638ede3c565eff9036ff3beaf884d3a9..e6a17455adac8fcb4797a4b02b8d400ca776ab64 100644 (file)
@@ -2269,6 +2269,8 @@ static int __ffs_data_do_os_desc(enum ffs_os_desc_type type,
                if (len < sizeof(*d) || h->interface >= ffs->interfaces_count)
                        return -EINVAL;
                length = le32_to_cpu(d->dwSize);
+               if (len < length)
+                       return -EINVAL;
                type = le32_to_cpu(d->dwPropertyDataType);
                if (type < USB_EXT_PROP_UNICODE ||
                    type > USB_EXT_PROP_UNICODE_MULTI) {
@@ -2277,6 +2279,11 @@ static int __ffs_data_do_os_desc(enum ffs_os_desc_type type,
                        return -EINVAL;
                }
                pnl = le16_to_cpu(d->wPropertyNameLength);
+               if (length < 14 + pnl) {
+                       pr_vdebug("invalid os descriptor length: %d pnl:%d (descriptor %d)\n",
+                                 length, pnl, type);
+                       return -EINVAL;
+               }
                pdl = le32_to_cpu(*(u32 *)((u8 *)data + 10 + pnl));
                if (length != 14 + pnl + pdl) {
                        pr_vdebug("invalid os descriptor length: %d pnl:%d pdl:%d (descriptor %d)\n",
@@ -2363,6 +2370,9 @@ static int __ffs_data_got_descs(struct ffs_data *ffs,
                }
        }
        if (flags & (1 << i)) {
+               if (len < 4) {
+                       goto error;
+               }
                os_descs_count = get_unaligned_le32(data);
                data += 4;
                len -= 4;
@@ -2435,7 +2445,8 @@ static int __ffs_data_got_strings(struct ffs_data *ffs,
 
        ENTER();
 
-       if (unlikely(get_unaligned_le32(data) != FUNCTIONFS_STRINGS_MAGIC ||
+       if (unlikely(len < 16 ||
+                    get_unaligned_le32(data) != FUNCTIONFS_STRINGS_MAGIC ||
                     get_unaligned_le32(data + 4) != len))
                goto error;
        str_count  = get_unaligned_le32(data + 8);
@@ -3687,7 +3698,7 @@ static void ffs_closed(struct ffs_data *ffs)
                goto done;
 
        if (opts->no_configfs || !opts->func_inst.group.cg_item.ci_parent
-           || !atomic_read(&opts->func_inst.group.cg_item.ci_kref.refcount))
+           || !kref_read(&opts->func_inst.group.cg_item.ci_kref))
                goto done;
 
        ci = opts->func_inst.group.cg_item.ci_parent->ci_parent;
index 33ff49c4cea428b2a257d63ff52a241a2031e2be..46847340b819b4d40e91ba148e295c00dedfc34a 100644 (file)
@@ -409,7 +409,7 @@ static void __exit mon_exit(void)
                        printk(KERN_ERR TAG
                            ": Outstanding opens (%d) on usb%d, leaking...\n",
                            mbus->nreaders, mbus->u_bus->busnum);
-                       atomic_set(&mbus->ref.refcount, 2);     /* Force leak */
+                       kref_get(&mbus->ref); /* Force leak */
                }
 
                mon_dissolve(mbus, mbus->u_bus);
index fca288bbc8009580ba96198ce1a2a49330074d20..772f1582124255d749ab470a42394498af8f9741 100644 (file)
@@ -594,11 +594,11 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
                                                | MUSB_PORT_STAT_RESUME;
                                musb->rh_timer = jiffies
                                        + msecs_to_jiffies(USB_RESUME_TIMEOUT);
-                               musb->need_finish_resume = 1;
-
                                musb->xceiv->otg->state = OTG_STATE_A_HOST;
                                musb->is_active = 1;
                                musb_host_resume_root_hub(musb);
+                               schedule_delayed_work(&musb->finish_resume_work,
+                                       msecs_to_jiffies(USB_RESUME_TIMEOUT));
                                break;
                        case OTG_STATE_B_WAIT_ACON:
                                musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
@@ -1925,6 +1925,14 @@ static void musb_pm_runtime_check_session(struct musb *musb)
 static void musb_irq_work(struct work_struct *data)
 {
        struct musb *musb = container_of(data, struct musb, irq_work.work);
+       int error;
+
+       error = pm_runtime_get_sync(musb->controller);
+       if (error < 0) {
+               dev_err(musb->controller, "Could not enable: %i\n", error);
+
+               return;
+       }
 
        musb_pm_runtime_check_session(musb);
 
@@ -1932,6 +1940,9 @@ static void musb_irq_work(struct work_struct *data)
                musb->xceiv_old_state = musb->xceiv->otg->state;
                sysfs_notify(&musb->controller->kobj, NULL, "mode");
        }
+
+       pm_runtime_mark_last_busy(musb->controller);
+       pm_runtime_put_autosuspend(musb->controller);
 }
 
 static void musb_recover_from_babble(struct musb *musb)
@@ -2710,11 +2721,6 @@ static int musb_resume(struct device *dev)
        mask = MUSB_DEVCTL_BDEVICE | MUSB_DEVCTL_FSDEV | MUSB_DEVCTL_LSDEV;
        if ((devctl & mask) != (musb->context.devctl & mask))
                musb->port1_status = 0;
-       if (musb->need_finish_resume) {
-               musb->need_finish_resume = 0;
-               schedule_delayed_work(&musb->finish_resume_work,
-                                     msecs_to_jiffies(USB_RESUME_TIMEOUT));
-       }
 
        /*
         * The USB HUB code expects the device to be in RPM_ACTIVE once it came
@@ -2766,12 +2772,6 @@ static int musb_runtime_resume(struct device *dev)
 
        musb_restore_context(musb);
 
-       if (musb->need_finish_resume) {
-               musb->need_finish_resume = 0;
-               schedule_delayed_work(&musb->finish_resume_work,
-                               msecs_to_jiffies(USB_RESUME_TIMEOUT));
-       }
-
        spin_lock_irqsave(&musb->lock, flags);
        error = musb_run_resume_work(musb);
        if (error)
index ade902ea1221e18543de05a5188c715d86af7398..ce5a18c98c6d1134231a29fa9229cf6804a0ae6e 100644 (file)
@@ -410,7 +410,6 @@ struct musb {
 
        /* is_suspended means USB B_PERIPHERAL suspend */
        unsigned                is_suspended:1;
-       unsigned                need_finish_resume :1;
 
        /* may_wakeup means remote wakeup is enabled */
        unsigned                may_wakeup:1;
index 7ce31a4c7e7fd3d186e8e05b20b9a3ca52700b6c..42cc72e54c051b2115c358bcee8bfc534258d206 100644 (file)
@@ -2007,6 +2007,7 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD200, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_6802, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD300, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x421d, 0xff, 0xff, 0xff) }, /* HP lt2523 (Novatel E371) */
        { } /* Terminating entry */
 };
 MODULE_DEVICE_TABLE(usb, option_ids);
index 46fca6b7584686744a9e79aae0bd78db08192813..1db4b61bdf7bd710d7be6e3ff81b97102d76fbe9 100644 (file)
@@ -49,6 +49,7 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) },
        { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) },
        { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID) },
+       { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID2) },
        { USB_DEVICE(ATEN_VENDOR_ID2, ATEN_PRODUCT_ID) },
        { USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID) },
        { USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID_UCSGT) },
index e3b7af8adfb73ccefa92d4ba3c2c927a044dfa43..09d9be88209e1ce6b1f53dc052a53c5e4c491336 100644 (file)
@@ -27,6 +27,7 @@
 #define ATEN_VENDOR_ID         0x0557
 #define ATEN_VENDOR_ID2                0x0547
 #define ATEN_PRODUCT_ID                0x2008
+#define ATEN_PRODUCT_ID2       0x2118
 
 #define IODATA_VENDOR_ID       0x04bb
 #define IODATA_PRODUCT_ID      0x0a03
index 1bc6089b90083a05e0ef3e4ff71c0ef752e3831a..696458db7e3c45e661a9825d05df0fe25dc0a832 100644 (file)
@@ -124,6 +124,7 @@ static const struct usb_device_id id_table[] = {
        {USB_DEVICE(0x1410, 0xa021)},   /* Novatel Gobi 3000 Composite */
        {USB_DEVICE(0x413c, 0x8193)},   /* Dell Gobi 3000 QDL */
        {USB_DEVICE(0x413c, 0x8194)},   /* Dell Gobi 3000 Composite */
+       {USB_DEVICE(0x413c, 0x81a6)},   /* Dell DW5570 QDL (MC8805) */
        {USB_DEVICE(0x1199, 0x68a4)},   /* Sierra Wireless QDL */
        {USB_DEVICE(0x1199, 0x68a5)},   /* Sierra Wireless Modem */
        {USB_DEVICE(0x1199, 0x68a8)},   /* Sierra Wireless QDL */
index c8823578a1b2afd3ae7a36c2f526fd071116876b..59b3f62a2d64ebd85be990630e09a48c7c7f2f1c 100644 (file)
@@ -1123,12 +1123,11 @@ static long tce_iommu_ioctl(void *iommu_data,
                mutex_lock(&container->lock);
 
                ret = tce_iommu_create_default_window(container);
-               if (ret)
-                       return ret;
-
-               ret = tce_iommu_create_window(container, create.page_shift,
-                               create.window_size, create.levels,
-                               &create.start_addr);
+               if (!ret)
+                       ret = tce_iommu_create_window(container,
+                                       create.page_shift,
+                                       create.window_size, create.levels,
+                                       &create.start_addr);
 
                mutex_unlock(&container->lock);
 
@@ -1246,6 +1245,8 @@ static void tce_iommu_release_ownership_ddw(struct tce_container *container,
 static long tce_iommu_take_ownership_ddw(struct tce_container *container,
                struct iommu_table_group *table_group)
 {
+       long i, ret = 0;
+
        if (!table_group->ops->create_table || !table_group->ops->set_window ||
                        !table_group->ops->release_ownership) {
                WARN_ON_ONCE(1);
@@ -1254,7 +1255,27 @@ static long tce_iommu_take_ownership_ddw(struct tce_container *container,
 
        table_group->ops->take_ownership(table_group);
 
+       /* Set all windows to the new group */
+       for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
+               struct iommu_table *tbl = container->tables[i];
+
+               if (!tbl)
+                       continue;
+
+               ret = table_group->ops->set_window(table_group, i, tbl);
+               if (ret)
+                       goto release_exit;
+       }
+
        return 0;
+
+release_exit:
+       for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i)
+               table_group->ops->unset_window(table_group, i);
+
+       table_group->ops->release_ownership(table_group);
+
+       return ret;
 }
 
 static int tce_iommu_attach_group(void *iommu_data,
@@ -1270,6 +1291,10 @@ static int tce_iommu_attach_group(void *iommu_data,
        /* pr_debug("tce_vfio: Attaching group #%u to iommu %p\n",
                        iommu_group_id(iommu_group), iommu_group); */
        table_group = iommu_group_get_iommudata(iommu_group);
+       if (!table_group) {
+               ret = -ENODEV;
+               goto unlock_exit;
+       }
 
        if (tce_groups_attached(container) && (!table_group->ops ||
                        !table_group->ops->take_ownership ||
index b3cc33fa6d26a69f44dc013b7bee55f6bd7682ab..bd6f293c4ebd59b5f283ebc1193767c982e1f1ef 100644 (file)
@@ -38,6 +38,8 @@
 #include <linux/workqueue.h>
 #include <linux/mdev.h>
 #include <linux/notifier.h>
+#include <linux/dma-iommu.h>
+#include <linux/irqdomain.h>
 
 #define DRIVER_VERSION  "0.2"
 #define DRIVER_AUTHOR   "Alex Williamson <alex.williamson@redhat.com>"
@@ -1179,6 +1181,28 @@ static struct vfio_group *find_iommu_group(struct vfio_domain *domain,
        return NULL;
 }
 
+static bool vfio_iommu_has_resv_msi(struct iommu_group *group,
+                                   phys_addr_t *base)
+{
+       struct list_head group_resv_regions;
+       struct iommu_resv_region *region, *next;
+       bool ret = false;
+
+       INIT_LIST_HEAD(&group_resv_regions);
+       iommu_get_group_resv_regions(group, &group_resv_regions);
+       list_for_each_entry(region, &group_resv_regions, list) {
+               if (region->type & IOMMU_RESV_MSI) {
+                       *base = region->start;
+                       ret = true;
+                       goto out;
+               }
+       }
+out:
+       list_for_each_entry_safe(region, next, &group_resv_regions, list)
+               kfree(region);
+       return ret;
+}
+
 static int vfio_iommu_type1_attach_group(void *iommu_data,
                                         struct iommu_group *iommu_group)
 {
@@ -1187,6 +1211,8 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
        struct vfio_domain *domain, *d;
        struct bus_type *bus = NULL, *mdev_bus;
        int ret;
+       bool resv_msi, msi_remap;
+       phys_addr_t resv_msi_base;
 
        mutex_lock(&iommu->lock);
 
@@ -1256,11 +1282,15 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
        if (ret)
                goto out_domain;
 
+       resv_msi = vfio_iommu_has_resv_msi(iommu_group, &resv_msi_base);
+
        INIT_LIST_HEAD(&domain->group_list);
        list_add(&group->next, &domain->group_list);
 
-       if (!allow_unsafe_interrupts &&
-           !iommu_capable(bus, IOMMU_CAP_INTR_REMAP)) {
+       msi_remap = resv_msi ? irq_domain_check_msi_remap() :
+                               iommu_capable(bus, IOMMU_CAP_INTR_REMAP);
+
+       if (!allow_unsafe_interrupts && !msi_remap) {
                pr_warn("%s: No interrupt remapping support.  Use the module param \"allow_unsafe_interrupts\" to enable VFIO IOMMU support on this platform\n",
                       __func__);
                ret = -EPERM;
@@ -1302,6 +1332,12 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
        if (ret)
                goto out_detach;
 
+       if (resv_msi) {
+               ret = iommu_get_msi_cookie(domain->domain, resv_msi_base);
+               if (ret)
+                       goto out_detach;
+       }
+
        list_add(&domain->next, &iommu->domain_list);
 
        mutex_unlock(&iommu->lock);
index d6432603880c1343ea2451eba6df1973e6d61822..8f99fe08de02e7b48725a99d682055c03056b82a 100644 (file)
@@ -130,14 +130,14 @@ static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx,
 
 static void vhost_init_is_le(struct vhost_virtqueue *vq)
 {
-       if (vhost_has_feature(vq, VIRTIO_F_VERSION_1))
-               vq->is_le = true;
+       vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1)
+               || virtio_legacy_is_little_endian();
 }
 #endif /* CONFIG_VHOST_CROSS_ENDIAN_LEGACY */
 
 static void vhost_reset_is_le(struct vhost_virtqueue *vq)
 {
-       vq->is_le = virtio_legacy_is_little_endian();
+       vhost_init_is_le(vq);
 }
 
 struct vhost_flush_struct {
@@ -1714,10 +1714,8 @@ int vhost_vq_init_access(struct vhost_virtqueue *vq)
        int r;
        bool is_le = vq->is_le;
 
-       if (!vq->private_data) {
-               vhost_reset_is_le(vq);
+       if (!vq->private_data)
                return 0;
-       }
 
        vhost_init_is_le(vq);
 
index bbbf588540ed71d82ed63deb355b77736a2a9628..ce5e63d2c66aac7d019c422ec294cab025e94e5e 100644 (file)
@@ -373,6 +373,7 @@ static void vhost_vsock_handle_rx_kick(struct vhost_work *work)
 
 static int vhost_vsock_start(struct vhost_vsock *vsock)
 {
+       struct vhost_virtqueue *vq;
        size_t i;
        int ret;
 
@@ -383,19 +384,20 @@ static int vhost_vsock_start(struct vhost_vsock *vsock)
                goto err;
 
        for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
-               struct vhost_virtqueue *vq = &vsock->vqs[i];
+               vq = &vsock->vqs[i];
 
                mutex_lock(&vq->mutex);
 
                if (!vhost_vq_access_ok(vq)) {
                        ret = -EFAULT;
-                       mutex_unlock(&vq->mutex);
                        goto err_vq;
                }
 
                if (!vq->private_data) {
                        vq->private_data = vsock;
-                       vhost_vq_init_access(vq);
+                       ret = vhost_vq_init_access(vq);
+                       if (ret)
+                               goto err_vq;
                }
 
                mutex_unlock(&vq->mutex);
@@ -405,8 +407,11 @@ static int vhost_vsock_start(struct vhost_vsock *vsock)
        return 0;
 
 err_vq:
+       vq->private_data = NULL;
+       mutex_unlock(&vq->mutex);
+
        for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
-               struct vhost_virtqueue *vq = &vsock->vqs[i];
+               vq = &vsock->vqs[i];
 
                mutex_lock(&vq->mutex);
                vq->private_data = NULL;
index f89245b8ba8e9a28483c4ff5edb03b80a1a9b2e3..68a113594808f220aa818424cd6e342897806a74 100644 (file)
@@ -163,17 +163,18 @@ void fb_dealloc_cmap(struct fb_cmap *cmap)
 
 int fb_copy_cmap(const struct fb_cmap *from, struct fb_cmap *to)
 {
-       int tooff = 0, fromoff = 0;
-       int size;
+       unsigned int tooff = 0, fromoff = 0;
+       size_t size;
 
        if (to->start > from->start)
                fromoff = to->start - from->start;
        else
                tooff = from->start - to->start;
-       size = to->len - tooff;
-       if (size > (int) (from->len - fromoff))
-               size = from->len - fromoff;
-       if (size <= 0)
+       if (fromoff >= from->len || tooff >= to->len)
+               return -EINVAL;
+
+       size = min_t(size_t, to->len - tooff, from->len - fromoff);
+       if (size == 0)
                return -EINVAL;
        size *= sizeof(u16);
 
@@ -187,17 +188,18 @@ int fb_copy_cmap(const struct fb_cmap *from, struct fb_cmap *to)
 
 int fb_cmap_to_user(const struct fb_cmap *from, struct fb_cmap_user *to)
 {
-       int tooff = 0, fromoff = 0;
-       int size;
+       unsigned int tooff = 0, fromoff = 0;
+       size_t size;
 
        if (to->start > from->start)
                fromoff = to->start - from->start;
        else
                tooff = from->start - to->start;
-       size = to->len - tooff;
-       if (size > (int) (from->len - fromoff))
-               size = from->len - fromoff;
-       if (size <= 0)
+       if (fromoff >= from->len || tooff >= to->len)
+               return -EINVAL;
+
+       size = min_t(size_t, to->len - tooff, from->len - fromoff);
+       if (size == 0)
                return -EINVAL;
        size *= sizeof(u16);
 
index d47a2fcef818f3cea1ce7f09160061b3c8d2f0a6..c71fde5fe835c48d1ce4611b29108f8cf7fb44f3 100644 (file)
@@ -59,6 +59,7 @@
 #define pr_fmt(fmt) "virtio-mmio: " fmt
 
 #include <linux/acpi.h>
+#include <linux/dma-mapping.h>
 #include <linux/highmem.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
@@ -498,6 +499,7 @@ static int virtio_mmio_probe(struct platform_device *pdev)
        struct virtio_mmio_device *vm_dev;
        struct resource *mem;
        unsigned long magic;
+       int rc;
 
        mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (!mem)
@@ -547,9 +549,25 @@ static int virtio_mmio_probe(struct platform_device *pdev)
        }
        vm_dev->vdev.id.vendor = readl(vm_dev->base + VIRTIO_MMIO_VENDOR_ID);
 
-       if (vm_dev->version == 1)
+       if (vm_dev->version == 1) {
                writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_GUEST_PAGE_SIZE);
 
+               rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+               /*
+                * In the legacy case, ensure our coherently-allocated virtio
+                * ring will be at an address expressable as a 32-bit PFN.
+                */
+               if (!rc)
+                       dma_set_coherent_mask(&pdev->dev,
+                                             DMA_BIT_MASK(32 + PAGE_SHIFT));
+       } else {
+               rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+       }
+       if (rc)
+               rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+       if (rc)
+               dev_warn(&pdev->dev, "Failed to enable 64-bit or 32-bit DMA.  Trying to continue, but this might not work.\n");
+
        platform_set_drvdata(pdev, vm_dev);
 
        return register_virtio_device(&vm_dev->vdev);
index f905d6eeb0482ee481cb24d9714bc6081a852d1e..f8afc6dcc29f2769694308092a4b543e5e0bed49 100644 (file)
@@ -414,9 +414,9 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
        if (map == SWIOTLB_MAP_ERROR)
                return DMA_ERROR_CODE;
 
+       dev_addr = xen_phys_to_bus(map);
        xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT),
                                        dev_addr, map & ~PAGE_MASK, size, dir, attrs);
-       dev_addr = xen_phys_to_bus(map);
 
        /*
         * Ensure that the address returned is DMA'ble
@@ -575,13 +575,14 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
                                sg_dma_len(sgl) = 0;
                                return 0;
                        }
+                       dev_addr = xen_phys_to_bus(map);
                        xen_dma_map_page(hwdev, pfn_to_page(map >> PAGE_SHIFT),
                                                dev_addr,
                                                map & ~PAGE_MASK,
                                                sg->length,
                                                dir,
                                                attrs);
-                       sg->dma_address = xen_phys_to_bus(map);
+                       sg->dma_address = dev_addr;
                } else {
                        /* we are not interested in the dma_addr returned by
                         * xen_dma_map_page, only in the potential cache flushes executed
index c2a377cdda2b03d6efe8768e4ef7894a06ebe853..83eab52fb3f69a75aa06a9f2a31760a384508f41 100644 (file)
@@ -38,6 +38,7 @@ config FS_DAX
        bool "Direct Access (DAX) support"
        depends on MMU
        depends on !(ARM || MIPS || SPARC)
+       select FS_IOMAP
        help
          Direct Access (DAX) can be used on memory-backed block devices.
          If the block device supports DAX and the filesystem supports DAX,
index 422370293cfd8cfc9b0f527f73498ce7c28244be..e7bf01373bc4c63b4ab6d27f4a9b2f9b8f7fed02 100644 (file)
@@ -1428,17 +1428,18 @@ static void fill_prstatus(struct elf_prstatus *prstatus,
                 * group-wide total, not its individual thread total.
                 */
                thread_group_cputime(p, &cputime);
-               cputime_to_timeval(cputime.utime, &prstatus->pr_utime);
-               cputime_to_timeval(cputime.stime, &prstatus->pr_stime);
+               prstatus->pr_utime = ns_to_timeval(cputime.utime);
+               prstatus->pr_stime = ns_to_timeval(cputime.stime);
        } else {
-               cputime_t utime, stime;
+               u64 utime, stime;
 
                task_cputime(p, &utime, &stime);
-               cputime_to_timeval(utime, &prstatus->pr_utime);
-               cputime_to_timeval(stime, &prstatus->pr_stime);
+               prstatus->pr_utime = ns_to_timeval(utime);
+               prstatus->pr_stime = ns_to_timeval(stime);
        }
-       cputime_to_timeval(p->signal->cutime, &prstatus->pr_cutime);
-       cputime_to_timeval(p->signal->cstime, &prstatus->pr_cstime);
+
+       prstatus->pr_cutime = ns_to_timeval(p->signal->cutime);
+       prstatus->pr_cstime = ns_to_timeval(p->signal->cstime);
 }
 
 static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
index d2e36f82c35d3ac5d96e00b22392926bdd1990e9..ffca4bbc3d63a197a043dba1d391864a8a926b08 100644 (file)
@@ -1349,17 +1349,17 @@ static void fill_prstatus(struct elf_prstatus *prstatus,
                 * group-wide total, not its individual thread total.
                 */
                thread_group_cputime(p, &cputime);
-               cputime_to_timeval(cputime.utime, &prstatus->pr_utime);
-               cputime_to_timeval(cputime.stime, &prstatus->pr_stime);
+               prstatus->pr_utime = ns_to_timeval(cputime.utime);
+               prstatus->pr_stime = ns_to_timeval(cputime.stime);
        } else {
-               cputime_t utime, stime;
+               u64 utime, stime;
 
                task_cputime(p, &utime, &stime);
-               cputime_to_timeval(utime, &prstatus->pr_utime);
-               cputime_to_timeval(stime, &prstatus->pr_stime);
+               prstatus->pr_utime = ns_to_timeval(utime);
+               prstatus->pr_stime = ns_to_timeval(stime);
        }
-       cputime_to_timeval(p->signal->cutime, &prstatus->pr_cutime);
-       cputime_to_timeval(p->signal->cstime, &prstatus->pr_cstime);
+       prstatus->pr_cutime = ns_to_timeval(p->signal->cutime);
+       prstatus->pr_cstime = ns_to_timeval(p->signal->cstime);
 
        prstatus->pr_exec_fdpic_loadmap = p->mm->context.exec_fdpic_loadmap;
        prstatus->pr_interp_fdpic_loadmap = p->mm->context.interp_fdpic_loadmap;
index 5db5d1340d69eccf475f0feac7f85665bd6aceb5..3c47614a4b32c75c4d5cf75d436389305c145288 100644 (file)
@@ -331,7 +331,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
        struct blk_plug plug;
        struct blkdev_dio *dio;
        struct bio *bio;
-       bool is_read = (iov_iter_rw(iter) == READ);
+       bool is_read = (iov_iter_rw(iter) == READ), is_sync;
        loff_t pos = iocb->ki_pos;
        blk_qc_t qc = BLK_QC_T_NONE;
        int ret;
@@ -344,7 +344,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
        bio_get(bio); /* extra ref for the completion handler */
 
        dio = container_of(bio, struct blkdev_dio, bio);
-       dio->is_sync = is_sync_kiocb(iocb);
+       dio->is_sync = is_sync = is_sync_kiocb(iocb);
        if (dio->is_sync)
                dio->waiter = current;
        else
@@ -398,7 +398,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
        }
        blk_finish_plug(&plug);
 
-       if (!dio->is_sync)
+       if (!is_sync)
                return -EIOCBQUEUED;
 
        for (;;) {
index 7f390849343b3e42b9399c2ac58b948c87944902..c4444d6f439f676cee59a20322d0f88fcb3cc4a3 100644 (file)
@@ -1024,6 +1024,7 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
        unsigned long buf_offset;
        unsigned long current_buf_start;
        unsigned long start_byte;
+       unsigned long prev_start_byte;
        unsigned long working_bytes = total_out - buf_start;
        unsigned long bytes;
        char *kaddr;
@@ -1071,26 +1072,34 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
                if (!bio->bi_iter.bi_size)
                        return 0;
                bvec = bio_iter_iovec(bio, bio->bi_iter);
-
+               prev_start_byte = start_byte;
                start_byte = page_offset(bvec.bv_page) - disk_start;
 
                /*
-                * make sure our new page is covered by this
-                * working buffer
+                * We need to make sure we're only adjusting
+                * our offset into compression working buffer when
+                * we're switching pages.  Otherwise we can incorrectly
+                * keep copying when we were actually done.
                 */
-               if (total_out <= start_byte)
-                       return 1;
+               if (start_byte != prev_start_byte) {
+                       /*
+                        * make sure our new page is covered by this
+                        * working buffer
+                        */
+                       if (total_out <= start_byte)
+                               return 1;
 
-               /*
-                * the next page in the biovec might not be adjacent
-                * to the last page, but it might still be found
-                * inside this working buffer. bump our offset pointer
-                */
-               if (total_out > start_byte &&
-                   current_buf_start < start_byte) {
-                       buf_offset = start_byte - buf_start;
-                       working_bytes = total_out - start_byte;
-                       current_buf_start = buf_start + buf_offset;
+                       /*
+                        * the next page in the biovec might not be adjacent
+                        * to the last page, but it might still be found
+                        * inside this working buffer. bump our offset pointer
+                        */
+                       if (total_out > start_byte &&
+                           current_buf_start < start_byte) {
+                               buf_offset = start_byte - buf_start;
+                               working_bytes = total_out - start_byte;
+                               current_buf_start = buf_start + buf_offset;
+                       }
                }
        }
 
index 4e024260ad713ffc583d8f2ffaeb7ba6a025014d..1e861a063721e7c173a770cd68d0fea484cb75b3 100644 (file)
@@ -3835,10 +3835,7 @@ cache_acl:
                break;
        case S_IFDIR:
                inode->i_fop = &btrfs_dir_file_operations;
-               if (root == fs_info->tree_root)
-                       inode->i_op = &btrfs_dir_ro_inode_operations;
-               else
-                       inode->i_op = &btrfs_dir_inode_operations;
+               inode->i_op = &btrfs_dir_inode_operations;
                break;
        case S_IFLNK:
                inode->i_op = &btrfs_symlink_inode_operations;
@@ -4505,8 +4502,19 @@ search_again:
                if (found_type > min_type) {
                        del_item = 1;
                } else {
-                       if (item_end < new_size)
+                       if (item_end < new_size) {
+                               /*
+                                * With NO_HOLES mode, for the following mapping
+                                *
+                                * [0-4k][hole][8k-12k]
+                                *
+                                * if truncating isize down to 6k, it ends up
+                                * isize being 8k.
+                                */
+                               if (btrfs_fs_incompat(root->fs_info, NO_HOLES))
+                                       last_size = new_size;
                                break;
+                       }
                        if (found_key.offset >= new_size)
                                del_item = 1;
                        else
@@ -5710,6 +5718,7 @@ static struct inode *new_simple_dir(struct super_block *s,
 
        inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID;
        inode->i_op = &btrfs_dir_ro_inode_operations;
+       inode->i_opflags &= ~IOP_XATTR;
        inode->i_fop = &simple_dir_operations;
        inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
        inode->i_mtime = current_time(inode);
@@ -7215,7 +7224,6 @@ static struct extent_map *btrfs_create_dio_extent(struct inode *inode,
        struct extent_map *em = NULL;
        int ret;
 
-       down_read(&BTRFS_I(inode)->dio_sem);
        if (type != BTRFS_ORDERED_NOCOW) {
                em = create_pinned_em(inode, start, len, orig_start,
                                      block_start, block_len, orig_block_len,
@@ -7234,7 +7242,6 @@ static struct extent_map *btrfs_create_dio_extent(struct inode *inode,
                em = ERR_PTR(ret);
        }
  out:
-       up_read(&BTRFS_I(inode)->dio_sem);
 
        return em;
 }
@@ -8692,6 +8699,7 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
                dio_data.unsubmitted_oe_range_start = (u64)offset;
                dio_data.unsubmitted_oe_range_end = (u64)offset;
                current->journal_info = &dio_data;
+               down_read(&BTRFS_I(inode)->dio_sem);
        } else if (test_bit(BTRFS_INODE_READDIO_NEED_LOCK,
                                     &BTRFS_I(inode)->runtime_flags)) {
                inode_dio_end(inode);
@@ -8704,6 +8712,7 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
                                   iter, btrfs_get_blocks_direct, NULL,
                                   btrfs_submit_direct, flags);
        if (iov_iter_rw(iter) == WRITE) {
+               up_read(&BTRFS_I(inode)->dio_sem);
                current->journal_info = NULL;
                if (ret < 0 && ret != -EIOCBQUEUED) {
                        if (dio_data.reserve)
@@ -9212,6 +9221,7 @@ static int btrfs_truncate(struct inode *inode)
                        break;
                }
 
+               btrfs_block_rsv_release(fs_info, rsv, -1);
                ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv,
                                              rsv, min_size, 0);
                BUG_ON(ret);    /* shouldn't happen */
@@ -10579,8 +10589,6 @@ static const struct inode_operations btrfs_dir_inode_operations = {
 static const struct inode_operations btrfs_dir_ro_inode_operations = {
        .lookup         = btrfs_lookup,
        .permission     = btrfs_permission,
-       .get_acl        = btrfs_get_acl,
-       .set_acl        = btrfs_set_acl,
        .update_time    = btrfs_update_time,
 };
 
index 33f967d30b2ad1d555015baea2bccc5eba05d1d5..21e51b0ba188a37be6b71cab9218a8b9457b86f8 100644 (file)
@@ -5653,6 +5653,10 @@ long btrfs_ioctl(struct file *file, unsigned int
 #ifdef CONFIG_COMPAT
 long btrfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 {
+       /*
+        * These all access 32-bit values anyway so no further
+        * handling is necessary.
+        */
        switch (cmd) {
        case FS_IOC32_GETFLAGS:
                cmd = FS_IOC_GETFLAGS;
@@ -5663,8 +5667,6 @@ long btrfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
        case FS_IOC32_GETVERSION:
                cmd = FS_IOC_GETVERSION;
                break;
-       default:
-               return -ENOIOCTLCMD;
        }
 
        return btrfs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
index 8f6a2a5863b9d9275bfb6afb00fc16b867101275..a27fc8791551cc86ca14d2d65e7870990a393f1e 100644 (file)
@@ -285,6 +285,7 @@ initiate_cifs_search(const unsigned int xid, struct file *file)
                        rc = -ENOMEM;
                        goto error_exit;
                }
+               spin_lock_init(&cifsFile->file_info_lock);
                file->private_data = cifsFile;
                cifsFile->tlink = cifs_get_tlink(tlink);
                tcon = tlink_tcon(tlink);
index 4d24d17bcfc1dc3ecd917f12f2a889da1a506e24..504b3c3539dceb07ddb0845b88905048fd3f23d7 100644 (file)
 #define elf_prstatus   compat_elf_prstatus
 #define elf_prpsinfo   compat_elf_prpsinfo
 
-/*
- * Compat version of cputime_to_compat_timeval, perhaps this
- * should be an inline in <linux/compat.h>.
- */
-static void cputime_to_compat_timeval(const cputime_t cputime,
-                                     struct compat_timeval *value)
-{
-       struct timeval tv;
-       cputime_to_timeval(cputime, &tv);
-       value->tv_sec = tv.tv_sec;
-       value->tv_usec = tv.tv_usec;
-}
-
-#undef cputime_to_timeval
-#define cputime_to_timeval cputime_to_compat_timeval
-
+#undef ns_to_timeval
+#define ns_to_timeval ns_to_compat_timeval
 
 /*
  * To use this file, asm/elf.h must define compat_elf_check_arch.
index ddcddfeaa03bd942e83738d34c4abaed06fa2709..c45598b912e14c981fdeb002b01c1535218c0ff2 100644 (file)
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -990,7 +990,6 @@ int __dax_zero_page_range(struct block_device *bdev, sector_t sector,
 }
 EXPORT_SYMBOL_GPL(__dax_zero_page_range);
 
-#ifdef CONFIG_FS_IOMAP
 static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos)
 {
        return iomap->blkno + (((pos & PAGE_MASK) - iomap->offset) >> 9);
@@ -1032,6 +1031,11 @@ dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
                struct blk_dax_ctl dax = { 0 };
                ssize_t map_len;
 
+               if (fatal_signal_pending(current)) {
+                       ret = -EINTR;
+                       break;
+               }
+
                dax.sector = dax_iomap_sector(iomap, pos);
                dax.size = (length + offset + PAGE_SIZE - 1) & PAGE_MASK;
                map_len = dax_map_atomic(iomap->bdev, &dax);
@@ -1428,4 +1432,3 @@ int dax_iomap_pmd_fault(struct vm_area_struct *vma, unsigned long address,
 }
 EXPORT_SYMBOL_GPL(dax_iomap_pmd_fault);
 #endif /* CONFIG_FS_DAX_PMD */
-#endif /* CONFIG_FS_IOMAP */
index 5e6a2c0a1f0b3eb78ed88d124a56b24453f9e790..1f7d5e46cdda12c20a66403b05e5524a9a3515f0 100644 (file)
@@ -122,7 +122,7 @@ void exofs_sysfs_dbg_print(void)
        list_for_each_entry_safe(k_name, k_tmp, &exofs_kset->list, entry) {
                printk(KERN_INFO "%s: name %s ref %d\n",
                        __func__, kobject_name(k_name),
-                       (int)atomic_read(&k_name->kref.refcount));
+                       (int)kref_read(&k_name->kref));
        }
 #endif
 }
index 36bea5adcabaa735056b20290ce23cdc7dfc0851..c634874e12d969fbd0b00ad8da745553168876e6 100644 (file)
@@ -1,6 +1,5 @@
 config EXT2_FS
        tristate "Second extended fs support"
-       select FS_IOMAP if FS_DAX
        help
          Ext2 is a standard Linux file system for hard disks.
 
index 7b90691e98c4f5fdd3b2d162285b00e71ac51b63..e38039fd96ff59ab59ce17407abcf26de4c5a950 100644 (file)
@@ -37,7 +37,6 @@ config EXT4_FS
        select CRC16
        select CRYPTO
        select CRYPTO_CRC32C
-       select FS_IOMAP if FS_DAX
        help
          This is the next generation of the ext3 filesystem.
 
index 4304072161aa08c14d24291bf24eb2481c567874..40d61077bead88e39abff93bcdb185941462bfcb 100644 (file)
@@ -542,6 +542,7 @@ void __fscache_disable_cookie(struct fscache_cookie *cookie, bool invalidate)
                hlist_for_each_entry(object, &cookie->backing_objects, cookie_link) {
                        if (invalidate)
                                set_bit(FSCACHE_OBJECT_RETIRED, &object->flags);
+                       clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
                        fscache_raise_event(object, FSCACHE_OBJECT_EV_KILL);
                }
        } else {
@@ -560,6 +561,10 @@ void __fscache_disable_cookie(struct fscache_cookie *cookie, bool invalidate)
                wait_on_atomic_t(&cookie->n_active, fscache_wait_atomic_t,
                                 TASK_UNINTERRUPTIBLE);
 
+       /* Make sure any pending writes are cancelled. */
+       if (cookie->def->type != FSCACHE_COOKIE_TYPE_INDEX)
+               fscache_invalidate_writes(cookie);
+
        /* Reset the cookie state if it wasn't relinquished */
        if (!test_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags)) {
                atomic_inc(&cookie->n_active);
index 9b28649df3a1fdc6f0f0c23b58b03db94dd69eb2..a8aa00be44442f59d6cf08516ab7403d0c02ab9e 100644 (file)
@@ -48,6 +48,7 @@ int __fscache_register_netfs(struct fscache_netfs *netfs)
        cookie->flags           = 1 << FSCACHE_COOKIE_ENABLED;
 
        spin_lock_init(&cookie->lock);
+       spin_lock_init(&cookie->stores_lock);
        INIT_HLIST_HEAD(&cookie->backing_objects);
 
        /* check the netfs type is not already present */
index 9e792e30f4db47b38c6db644487c440a2e12febb..7a182c87f37805f1a5fa6719f5cc06cf3dd38552 100644 (file)
@@ -30,6 +30,7 @@ static const struct fscache_state *fscache_look_up_object(struct fscache_object
 static const struct fscache_state *fscache_object_available(struct fscache_object *, int);
 static const struct fscache_state *fscache_parent_ready(struct fscache_object *, int);
 static const struct fscache_state *fscache_update_object(struct fscache_object *, int);
+static const struct fscache_state *fscache_object_dead(struct fscache_object *, int);
 
 #define __STATE_NAME(n) fscache_osm_##n
 #define STATE(n) (&__STATE_NAME(n))
@@ -91,7 +92,7 @@ static WORK_STATE(LOOKUP_FAILURE,     "LCFL", fscache_lookup_failure);
 static WORK_STATE(KILL_OBJECT,         "KILL", fscache_kill_object);
 static WORK_STATE(KILL_DEPENDENTS,     "KDEP", fscache_kill_dependents);
 static WORK_STATE(DROP_OBJECT,         "DROP", fscache_drop_object);
-static WORK_STATE(OBJECT_DEAD,         "DEAD", (void*)2UL);
+static WORK_STATE(OBJECT_DEAD,         "DEAD", fscache_object_dead);
 
 static WAIT_STATE(WAIT_FOR_INIT,       "?INI",
                  TRANSIT_TO(INIT_OBJECT,       1 << FSCACHE_OBJECT_EV_NEW_CHILD));
@@ -229,6 +230,10 @@ execute_work_state:
        event = -1;
        if (new_state == NO_TRANSIT) {
                _debug("{OBJ%x} %s notrans", object->debug_id, state->name);
+               if (unlikely(state == STATE(OBJECT_DEAD))) {
+                       _leave(" [dead]");
+                       return;
+               }
                fscache_enqueue_object(object);
                event_mask = object->oob_event_mask;
                goto unmask_events;
@@ -239,7 +244,7 @@ execute_work_state:
        object->state = state = new_state;
 
        if (state->work) {
-               if (unlikely(state->work == ((void *)2UL))) {
+               if (unlikely(state == STATE(OBJECT_DEAD))) {
                        _leave(" [dead]");
                        return;
                }
@@ -645,6 +650,12 @@ static const struct fscache_state *fscache_kill_object(struct fscache_object *ob
        fscache_mark_object_dead(object);
        object->oob_event_mask = 0;
 
+       if (test_bit(FSCACHE_OBJECT_RETIRED, &object->flags)) {
+               /* Reject any new read/write ops and abort any that are pending. */
+               clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
+               fscache_cancel_all_ops(object);
+       }
+
        if (list_empty(&object->dependents) &&
            object->n_ops == 0 &&
            object->n_children == 0)
@@ -1077,3 +1088,20 @@ void fscache_object_mark_killed(struct fscache_object *object,
        }
 }
 EXPORT_SYMBOL(fscache_object_mark_killed);
+
+/*
+ * The object is dead.  We can get here if an object gets queued by an event
+ * that would lead to its death (such as EV_KILL) when the dispatcher is
+ * already running (and so can be requeued) but hasn't yet cleared the event
+ * mask.
+ */
+static const struct fscache_state *fscache_object_dead(struct fscache_object *object,
+                                                      int event)
+{
+       if (!test_and_set_bit(FSCACHE_OBJECT_RUN_AFTER_DEAD,
+                             &object->flags))
+               return NO_TRANSIT;
+
+       WARN(true, "FS-Cache object redispatched after death");
+       return NO_TRANSIT;
+}
index 4e06a27ed7f80d4d0472e3d6c9e9fe3f0f1d7da5..f11792672977d2280ec7fc301f81e9b1b1ba7741 100644 (file)
@@ -399,6 +399,10 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req)
 static void queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
 {
        spin_lock(&fiq->waitq.lock);
+       if (test_bit(FR_FINISHED, &req->flags)) {
+               spin_unlock(&fiq->waitq.lock);
+               return;
+       }
        if (list_empty(&req->intr_entry)) {
                list_add_tail(&req->intr_entry, &fiq->interrupts);
                wake_up_locked(&fiq->waitq);
@@ -1372,6 +1376,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
                 * code can Oops if the buffer persists after module unload.
                 */
                bufs[page_nr].ops = &nosteal_pipe_buf_ops;
+               bufs[page_nr].flags = 0;
                ret = add_to_pipe(pipe, &bufs[page_nr++]);
                if (unlikely(ret < 0))
                        break;
index 91307940c8ac5e921b08133a04ca0b65283fd308..052f8d3c41cb040405a63248ffebc613dcc893e6 100644 (file)
@@ -256,7 +256,7 @@ struct fuse_io_priv {
 
 #define FUSE_IO_PRIV_SYNC(f) \
 {                                      \
-       .refcnt = { ATOMIC_INIT(1) },   \
+       .refcnt = KREF_INIT(1),         \
        .async = 0,                     \
        .file = f,                      \
 }
index 354a123f170e534a016f74ca7006458e3b823ef8..a51cb4c07d4d8cd3a09715361c84126cecae2ca2 100644 (file)
@@ -114,6 +114,9 @@ iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
 
        BUG_ON(pos + len > iomap->offset + iomap->length);
 
+       if (fatal_signal_pending(current))
+               return -EINTR;
+
        page = grab_cache_page_write_begin(inode->i_mapping, index, flags);
        if (!page)
                return -ENOMEM;
index 8c514367ba5acb460d293fd6007e6479b7ceae0f..b6b194ec1b4f9afbb4b9ced950d03ac0018aaa35 100644 (file)
@@ -393,7 +393,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
        /* Do we need to erase the effects of a prior jbd2_journal_flush? */
        if (journal->j_flags & JBD2_FLUSHED) {
                jbd_debug(3, "super block updated\n");
-               mutex_lock(&journal->j_checkpoint_mutex);
+               mutex_lock_io(&journal->j_checkpoint_mutex);
                /*
                 * We hold j_checkpoint_mutex so tail cannot change under us.
                 * We don't need any special data guarantees for writing sb
index a097048ed1a3a0315a36c36befb13e78166a1db4..d8a5d0a08f0758c5c3e6e9e4a18a772b706e2646 100644 (file)
@@ -944,7 +944,7 @@ out:
  */
 void jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block)
 {
-       mutex_lock(&journal->j_checkpoint_mutex);
+       mutex_lock_io(&journal->j_checkpoint_mutex);
        if (tid_gt(tid, journal->j_tail_sequence))
                __jbd2_update_log_tail(journal, tid, block);
        mutex_unlock(&journal->j_checkpoint_mutex);
@@ -1304,7 +1304,7 @@ static int journal_reset(journal_t *journal)
                journal->j_flags |= JBD2_FLUSHED;
        } else {
                /* Lock here to make assertions happy... */
-               mutex_lock(&journal->j_checkpoint_mutex);
+               mutex_lock_io(&journal->j_checkpoint_mutex);
                /*
                 * Update log tail information. We use REQ_FUA since new
                 * transaction will start reusing journal space and so we
@@ -1691,7 +1691,7 @@ int jbd2_journal_destroy(journal_t *journal)
        spin_lock(&journal->j_list_lock);
        while (journal->j_checkpoint_transactions != NULL) {
                spin_unlock(&journal->j_list_lock);
-               mutex_lock(&journal->j_checkpoint_mutex);
+               mutex_lock_io(&journal->j_checkpoint_mutex);
                err = jbd2_log_do_checkpoint(journal);
                mutex_unlock(&journal->j_checkpoint_mutex);
                /*
@@ -1713,7 +1713,7 @@ int jbd2_journal_destroy(journal_t *journal)
 
        if (journal->j_sb_buffer) {
                if (!is_journal_aborted(journal)) {
-                       mutex_lock(&journal->j_checkpoint_mutex);
+                       mutex_lock_io(&journal->j_checkpoint_mutex);
 
                        write_lock(&journal->j_state_lock);
                        journal->j_tail_sequence =
@@ -1955,7 +1955,7 @@ int jbd2_journal_flush(journal_t *journal)
        spin_lock(&journal->j_list_lock);
        while (!err && journal->j_checkpoint_transactions != NULL) {
                spin_unlock(&journal->j_list_lock);
-               mutex_lock(&journal->j_checkpoint_mutex);
+               mutex_lock_io(&journal->j_checkpoint_mutex);
                err = jbd2_log_do_checkpoint(journal);
                mutex_unlock(&journal->j_checkpoint_mutex);
                spin_lock(&journal->j_list_lock);
@@ -1965,7 +1965,7 @@ int jbd2_journal_flush(journal_t *journal)
        if (is_journal_aborted(journal))
                return -EIO;
 
-       mutex_lock(&journal->j_checkpoint_mutex);
+       mutex_lock_io(&journal->j_checkpoint_mutex);
        if (!err) {
                err = jbd2_cleanup_journal_tail(journal);
                if (err < 0) {
index ecc151697fd4bd81288941848a4b6a76e7563a24..0a0eaecf967683f66461874408a1f572c4b154f1 100644 (file)
@@ -2700,7 +2700,8 @@ static inline void nfs4_exclusive_attrset(struct nfs4_opendata *opendata,
                sattr->ia_valid |= ATTR_MTIME;
 
        /* Except MODE, it seems harmless of setting twice. */
-       if ((attrset[1] & FATTR4_WORD1_MODE))
+       if (opendata->o_arg.createmode != NFS4_CREATE_EXCLUSIVE &&
+               attrset[1] & FATTR4_WORD1_MODE)
                sattr->ia_valid &= ~ATTR_MODE;
 
        if (attrset[2] & FATTR4_WORD2_SECURITY_LABEL)
@@ -8490,6 +8491,7 @@ nfs4_layoutget_handle_exception(struct rpc_task *task,
                goto out;
        }
 
+       nfs4_sequence_free_slot(&lgp->res.seq_res);
        err = nfs4_handle_exception(server, nfs4err, exception);
        if (!status) {
                if (exception->retry)
index 90e6193ce6bed300ddadb1aa1a9aca64a8c2aafc..daeb94e3acd49bd6553601bd15adb8f72236825b 100644 (file)
@@ -1091,6 +1091,7 @@ static void nfs_increment_seqid(int status, struct nfs_seqid *seqid)
                case -NFS4ERR_BADXDR:
                case -NFS4ERR_RESOURCE:
                case -NFS4ERR_NOFILEHANDLE:
+               case -NFS4ERR_MOVED:
                        /* Non-seqid mutating errors */
                        return;
        };
index 59554f3adf2948a10dd945b5f8441c236f53e9f2..dd042498ce7c67df24b4919c2e98dc8f48db13ca 100644 (file)
@@ -1200,10 +1200,10 @@ _pnfs_return_layout(struct inode *ino)
 
        send = pnfs_prepare_layoutreturn(lo, &stateid, NULL);
        spin_unlock(&ino->i_lock);
-       pnfs_free_lseg_list(&tmp_list);
        if (send)
                status = pnfs_send_layoutreturn(lo, &stateid, IOMODE_ANY, true);
 out_put_layout_hdr:
+       pnfs_free_lseg_list(&tmp_list);
        pnfs_put_layout_hdr(lo);
 out:
        dprintk("<-- %s status: %d\n", __func__, status);
index 596205d939a1f43f1faa292d31680377045a8589..1fc07a9c70e9c6028342e8c97d183dfe914a343b 100644 (file)
@@ -223,10 +223,11 @@ nfsd4_alloc_layout_stateid(struct nfsd4_compound_state *cstate,
        struct nfs4_layout_stateid *ls;
        struct nfs4_stid *stp;
 
-       stp = nfs4_alloc_stid(cstate->clp, nfs4_layout_stateid_cache);
+       stp = nfs4_alloc_stid(cstate->clp, nfs4_layout_stateid_cache,
+                                       nfsd4_free_layout_stateid);
        if (!stp)
                return NULL;
-       stp->sc_free = nfsd4_free_layout_stateid;
+
        get_nfs4_file(fp);
        stp->sc_file = fp;
 
index 4b4beaaa4eaac01233f874c7dfdb8d1a6d7cd3d6..a0dee8ae9f97f16a18e40ba19f8e84a45ad1a02b 100644 (file)
@@ -633,8 +633,8 @@ out:
        return co;
 }
 
-struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl,
-                                        struct kmem_cache *slab)
+struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab,
+                                 void (*sc_free)(struct nfs4_stid *))
 {
        struct nfs4_stid *stid;
        int new_id;
@@ -650,6 +650,8 @@ struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl,
        idr_preload_end();
        if (new_id < 0)
                goto out_free;
+
+       stid->sc_free = sc_free;
        stid->sc_client = cl;
        stid->sc_stateid.si_opaque.so_id = new_id;
        stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid;
@@ -675,15 +677,12 @@ out_free:
 static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp)
 {
        struct nfs4_stid *stid;
-       struct nfs4_ol_stateid *stp;
 
-       stid = nfs4_alloc_stid(clp, stateid_slab);
+       stid = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_ol_stateid);
        if (!stid)
                return NULL;
 
-       stp = openlockstateid(stid);
-       stp->st_stid.sc_free = nfs4_free_ol_stateid;
-       return stp;
+       return openlockstateid(stid);
 }
 
 static void nfs4_free_deleg(struct nfs4_stid *stid)
@@ -781,11 +780,10 @@ alloc_init_deleg(struct nfs4_client *clp, struct svc_fh *current_fh,
                goto out_dec;
        if (delegation_blocked(&current_fh->fh_handle))
                goto out_dec;
-       dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab));
+       dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab, nfs4_free_deleg));
        if (dp == NULL)
                goto out_dec;
 
-       dp->dl_stid.sc_free = nfs4_free_deleg;
        /*
         * delegation seqid's are never incremented.  The 4.1 special
         * meaning of seqid 0 isn't meaningful, really, but let's avoid
@@ -5580,7 +5578,6 @@ init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
        stp->st_stateowner = nfs4_get_stateowner(&lo->lo_owner);
        get_nfs4_file(fp);
        stp->st_stid.sc_file = fp;
-       stp->st_stid.sc_free = nfs4_free_lock_stateid;
        stp->st_access_bmap = 0;
        stp->st_deny_bmap = open_stp->st_deny_bmap;
        stp->st_openstp = open_stp;
@@ -5623,7 +5620,7 @@ find_or_create_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fi,
        lst = find_lock_stateid(lo, fi);
        if (lst == NULL) {
                spin_unlock(&clp->cl_lock);
-               ns = nfs4_alloc_stid(clp, stateid_slab);
+               ns = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_lock_stateid);
                if (ns == NULL)
                        return NULL;
 
index c9399366f9dfc73b343d079fbad2dc2127927aae..4516e8b7d776305d94fb89f86256ee3fc54dec27 100644 (file)
@@ -603,8 +603,8 @@ extern __be32 nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
 __be32 nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
                     stateid_t *stateid, unsigned char typemask,
                     struct nfs4_stid **s, struct nfsd_net *nn);
-struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl,
-               struct kmem_cache *slab);
+struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab,
+                                 void (*sc_free)(struct nfs4_stid *));
 void nfs4_unhash_stid(struct nfs4_stid *s);
 void nfs4_put_stid(struct nfs4_stid *s);
 void nfs4_inc_and_copy_stateid(stateid_t *dst, struct nfs4_stid *stid);
index 27d1242c8383ba6a6962749cc4443d2feef77c93..564c504d6efd8c99f903e95d96a87b47ce7db36a 100644 (file)
@@ -349,7 +349,7 @@ static void sc_show_sock_container(struct seq_file *seq,
                   "  func key:        0x%08x\n"
                   "  func type:       %u\n",
                   sc,
-                  atomic_read(&sc->sc_kref.refcount),
+                  kref_read(&sc->sc_kref),
                   &saddr, inet ? ntohs(sport) : 0,
                   &daddr, inet ? ntohs(dport) : 0,
                   sc->sc_node->nd_name,
index d4b5c81f0445992294dc33b523ab21025940ea76..ec000575e8634333c32f56142d203b74e4ace45a 100644 (file)
@@ -97,7 +97,7 @@
        typeof(sc) __sc = (sc);                                         \
        mlog(ML_SOCKET, "[sc %p refs %d sock %p node %u page %p "       \
             "pg_off %zu] " fmt, __sc,                                  \
-            atomic_read(&__sc->sc_kref.refcount), __sc->sc_sock,       \
+            kref_read(&__sc->sc_kref), __sc->sc_sock,  \
            __sc->sc_node->nd_num, __sc->sc_page, __sc->sc_page_off ,   \
            ##args);                                                    \
 } while (0)
index e7b760deefaee65402416e187ad4e2b40fc13e62..9b984cae4c4e0dc6f7b85b02baba6cadd11ac0a4 100644 (file)
@@ -81,7 +81,7 @@ static void __dlm_print_lock(struct dlm_lock *lock)
               lock->ml.type, lock->ml.convert_type, lock->ml.node,
               dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
               dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
-              atomic_read(&lock->lock_refs.refcount),
+              kref_read(&lock->lock_refs),
               (list_empty(&lock->ast_list) ? 'y' : 'n'),
               (lock->ast_pending ? 'y' : 'n'),
               (list_empty(&lock->bast_list) ? 'y' : 'n'),
@@ -106,7 +106,7 @@ void __dlm_print_one_lock_resource(struct dlm_lock_resource *res)
        printk("lockres: %s, owner=%u, state=%u\n",
               buf, res->owner, res->state);
        printk("  last used: %lu, refcnt: %u, on purge list: %s\n",
-              res->last_used, atomic_read(&res->refs.refcount),
+              res->last_used, kref_read(&res->refs),
               list_empty(&res->purge) ? "no" : "yes");
        printk("  on dirty list: %s, on reco list: %s, "
               "migrating pending: %s\n",
@@ -298,7 +298,7 @@ static int dump_mle(struct dlm_master_list_entry *mle, char *buf, int len)
                        mle_type, mle->master, mle->new_master,
                        !list_empty(&mle->hb_events),
                        !!mle->inuse,
-                       atomic_read(&mle->mle_refs.refcount));
+                       kref_read(&mle->mle_refs));
 
        out += snprintf(buf + out, len - out, "Maybe=");
        out += stringify_nodemap(mle->maybe_map, O2NM_MAX_NODES,
@@ -494,7 +494,7 @@ static int dump_lock(struct dlm_lock *lock, int list_type, char *buf, int len)
                       lock->ast_pending, lock->bast_pending,
                       lock->convert_pending, lock->lock_pending,
                       lock->cancel_pending, lock->unlock_pending,
-                      atomic_read(&lock->lock_refs.refcount));
+                      kref_read(&lock->lock_refs));
        spin_unlock(&lock->spinlock);
 
        return out;
@@ -521,7 +521,7 @@ static int dump_lockres(struct dlm_lock_resource *res, char *buf, int len)
                        !list_empty(&res->recovering),
                        res->inflight_locks, res->migration_pending,
                        atomic_read(&res->asts_reserved),
-                       atomic_read(&res->refs.refcount));
+                       kref_read(&res->refs));
 
        /* refmap */
        out += snprintf(buf + out, len - out, "RMAP:");
@@ -777,7 +777,7 @@ static int debug_state_print(struct dlm_ctxt *dlm, char *buf, int len)
        /* Purge Count: xxx  Refs: xxx */
        out += snprintf(buf + out, len - out,
                        "Purge Count: %d  Refs: %d\n", dlm->purge_count,
-                       atomic_read(&dlm->dlm_refs.refcount));
+                       kref_read(&dlm->dlm_refs));
 
        /* Dead Node: xxx */
        out += snprintf(buf + out, len - out,
index 733e4e79c8e25f02e3a3af1ad562049d32742cbc..32fd261ae13d02ce0844285b3cb8c7b529f0a3ab 100644 (file)
@@ -2072,7 +2072,7 @@ static struct dlm_ctxt *dlm_alloc_ctxt(const char *domain,
        INIT_LIST_HEAD(&dlm->dlm_eviction_callbacks);
 
        mlog(0, "context init: refcount %u\n",
-                 atomic_read(&dlm->dlm_refs.refcount));
+                 kref_read(&dlm->dlm_refs));
 
 leave:
        if (ret < 0 && dlm) {
index a464c8088170aa3fc40267e79cc84b8bd39139a7..7025d8c279991e36ec5f9eaaebf585254b3604dd 100644 (file)
@@ -233,7 +233,7 @@ static void __dlm_put_mle(struct dlm_master_list_entry *mle)
 
        assert_spin_locked(&dlm->spinlock);
        assert_spin_locked(&dlm->master_lock);
-       if (!atomic_read(&mle->mle_refs.refcount)) {
+       if (!kref_read(&mle->mle_refs)) {
                /* this may or may not crash, but who cares.
                 * it's a BUG. */
                mlog(ML_ERROR, "bad mle: %p\n", mle);
@@ -1124,9 +1124,9 @@ recheck:
                unsigned long timeo = msecs_to_jiffies(DLM_MASTERY_TIMEOUT_MS);
 
                /*
-               if (atomic_read(&mle->mle_refs.refcount) < 2)
+               if (kref_read(&mle->mle_refs) < 2)
                        mlog(ML_ERROR, "mle (%p) refs=%d, name=%.*s\n", mle,
-                       atomic_read(&mle->mle_refs.refcount),
+                       kref_read(&mle->mle_refs),
                        res->lockname.len, res->lockname.name);
                */
                atomic_set(&mle->woken, 0);
@@ -1979,7 +1979,7 @@ ok:
                 * on this mle. */
                spin_lock(&dlm->master_lock);
 
-               rr = atomic_read(&mle->mle_refs.refcount);
+               rr = kref_read(&mle->mle_refs);
                if (mle->inuse > 0) {
                        if (extra_ref && rr < 3)
                                err = 1;
index 1082b2c3014bed10ecafb04e2b17b1585acb0275..63d701cd1e2e70a1d5962c253ca5625d418d7992 100644 (file)
@@ -251,7 +251,7 @@ leave:
                mlog(0, "lock %u:%llu should be gone now! refs=%d\n",
                     dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
                     dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
-                    atomic_read(&lock->lock_refs.refcount)-1);
+                    kref_read(&lock->lock_refs)-1);
                dlm_lock_put(lock);
        }
        if (actions & DLM_UNLOCK_CALL_AST)
index 51a4213afa2e26a14808634f7d3d2e7bbd804e16..fe12b519d09b53fb17e95969935388ccafaef51a 100644 (file)
@@ -401,8 +401,8 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
        unsigned long long start_time;
        unsigned long cmin_flt = 0, cmaj_flt = 0;
        unsigned long  min_flt = 0,  maj_flt = 0;
-       cputime_t cutime, cstime, utime, stime;
-       cputime_t cgtime, gtime;
+       u64 cutime, cstime, utime, stime;
+       u64 cgtime, gtime;
        unsigned long rsslim = 0;
        char tcomm[sizeof(task->comm)];
        unsigned long flags;
@@ -497,10 +497,10 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
        seq_put_decimal_ull(m, " ", cmin_flt);
        seq_put_decimal_ull(m, " ", maj_flt);
        seq_put_decimal_ull(m, " ", cmaj_flt);
-       seq_put_decimal_ull(m, " ", cputime_to_clock_t(utime));
-       seq_put_decimal_ull(m, " ", cputime_to_clock_t(stime));
-       seq_put_decimal_ll(m, " ", cputime_to_clock_t(cutime));
-       seq_put_decimal_ll(m, " ", cputime_to_clock_t(cstime));
+       seq_put_decimal_ull(m, " ", nsec_to_clock_t(utime));
+       seq_put_decimal_ull(m, " ", nsec_to_clock_t(stime));
+       seq_put_decimal_ll(m, " ", nsec_to_clock_t(cutime));
+       seq_put_decimal_ll(m, " ", nsec_to_clock_t(cstime));
        seq_put_decimal_ll(m, " ", priority);
        seq_put_decimal_ll(m, " ", nice);
        seq_put_decimal_ll(m, " ", num_threads);
@@ -542,8 +542,8 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
        seq_put_decimal_ull(m, " ", task->rt_priority);
        seq_put_decimal_ull(m, " ", task->policy);
        seq_put_decimal_ull(m, " ", delayacct_blkio_ticks(task));
-       seq_put_decimal_ull(m, " ", cputime_to_clock_t(gtime));
-       seq_put_decimal_ll(m, " ", cputime_to_clock_t(cgtime));
+       seq_put_decimal_ull(m, " ", nsec_to_clock_t(gtime));
+       seq_put_decimal_ll(m, " ", nsec_to_clock_t(cgtime));
 
        if (mm && permitted) {
                seq_put_decimal_ull(m, " ", mm->start_data);
index 8e7e61b28f31c037961c081d09a7be5f818013ef..b1f7d30e96c27aa5d3e2a9abe0052e123087a448 100644 (file)
@@ -2179,7 +2179,7 @@ static const struct file_operations proc_map_files_operations = {
        .llseek         = generic_file_llseek,
 };
 
-#ifdef CONFIG_CHECKPOINT_RESTORE
+#if defined(CONFIG_CHECKPOINT_RESTORE) && defined(CONFIG_POSIX_TIMERS)
 struct timers_private {
        struct pid *pid;
        struct task_struct *task;
@@ -2936,7 +2936,7 @@ static const struct pid_entry tgid_base_stuff[] = {
        REG("projid_map", S_IRUGO|S_IWUSR, proc_projid_map_operations),
        REG("setgroups",  S_IRUGO|S_IWUSR, proc_setgroups_operations),
 #endif
-#ifdef CONFIG_CHECKPOINT_RESTORE
+#if defined(CONFIG_CHECKPOINT_RESTORE) && defined(CONFIG_POSIX_TIMERS)
        REG("timers",     S_IRUGO, proc_timers_operations),
 #endif
        REG("timerslack_ns", S_IRUGO|S_IWUGO, proc_pid_set_timerslack_ns_operations),
@@ -3179,6 +3179,8 @@ int proc_pid_readdir(struct file *file, struct dir_context *ctx)
             iter.tgid += 1, iter = next_tgid(ns, iter)) {
                char name[PROC_NUMBUF];
                int len;
+
+               cond_resched();
                if (!has_pid_permissions(ns, iter.task, 2))
                        continue;
 
index a2066e6dee9058ac41b3c51cfcad83f0796e0598..2726536489b19a30394226a27e7a0183690e6b4c 100644 (file)
@@ -173,7 +173,8 @@ u64 stable_page_flags(struct page *page)
        u |= kpf_copy_bit(k, KPF_ACTIVE,        PG_active);
        u |= kpf_copy_bit(k, KPF_RECLAIM,       PG_reclaim);
 
-       u |= kpf_copy_bit(k, KPF_SWAPCACHE,     PG_swapcache);
+       if (PageSwapCache(page))
+               u |= 1 << KPF_SWAPCACHE;
        u |= kpf_copy_bit(k, KPF_SWAPBACKED,    PG_swapbacked);
 
        u |= kpf_copy_bit(k, KPF_UNEVICTABLE,   PG_unevictable);
index d700c42b357263b8e5106a7aed6fe1e301a7892c..e47c3e8c4dfed5c88b4afd41de2a9be05cd7f92d 100644 (file)
@@ -21,9 +21,9 @@
 
 #ifdef arch_idle_time
 
-static cputime64_t get_idle_time(int cpu)
+static u64 get_idle_time(int cpu)
 {
-       cputime64_t idle;
+       u64 idle;
 
        idle = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE];
        if (cpu_online(cpu) && !nr_iowait_cpu(cpu))
@@ -31,9 +31,9 @@ static cputime64_t get_idle_time(int cpu)
        return idle;
 }
 
-static cputime64_t get_iowait_time(int cpu)
+static u64 get_iowait_time(int cpu)
 {
-       cputime64_t iowait;
+       u64 iowait;
 
        iowait = kcpustat_cpu(cpu).cpustat[CPUTIME_IOWAIT];
        if (cpu_online(cpu) && nr_iowait_cpu(cpu))
@@ -45,32 +45,32 @@ static cputime64_t get_iowait_time(int cpu)
 
 static u64 get_idle_time(int cpu)
 {
-       u64 idle, idle_time = -1ULL;
+       u64 idle, idle_usecs = -1ULL;
 
        if (cpu_online(cpu))
-               idle_time = get_cpu_idle_time_us(cpu, NULL);
+               idle_usecs = get_cpu_idle_time_us(cpu, NULL);
 
-       if (idle_time == -1ULL)
+       if (idle_usecs == -1ULL)
                /* !NO_HZ or cpu offline so we can rely on cpustat.idle */
                idle = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE];
        else
-               idle = usecs_to_cputime64(idle_time);
+               idle = idle_usecs * NSEC_PER_USEC;
 
        return idle;
 }
 
 static u64 get_iowait_time(int cpu)
 {
-       u64 iowait, iowait_time = -1ULL;
+       u64 iowait, iowait_usecs = -1ULL;
 
        if (cpu_online(cpu))
-               iowait_time = get_cpu_iowait_time_us(cpu, NULL);
+               iowait_usecs = get_cpu_iowait_time_us(cpu, NULL);
 
-       if (iowait_time == -1ULL)
+       if (iowait_usecs == -1ULL)
                /* !NO_HZ or cpu offline so we can rely on cpustat.iowait */
                iowait = kcpustat_cpu(cpu).cpustat[CPUTIME_IOWAIT];
        else
-               iowait = usecs_to_cputime64(iowait_time);
+               iowait = iowait_usecs * NSEC_PER_USEC;
 
        return iowait;
 }
@@ -115,16 +115,16 @@ static int show_stat(struct seq_file *p, void *v)
        }
        sum += arch_irq_stat();
 
-       seq_put_decimal_ull(p, "cpu  ", cputime64_to_clock_t(user));
-       seq_put_decimal_ull(p, " ", cputime64_to_clock_t(nice));
-       seq_put_decimal_ull(p, " ", cputime64_to_clock_t(system));
-       seq_put_decimal_ull(p, " ", cputime64_to_clock_t(idle));
-       seq_put_decimal_ull(p, " ", cputime64_to_clock_t(iowait));
-       seq_put_decimal_ull(p, " ", cputime64_to_clock_t(irq));
-       seq_put_decimal_ull(p, " ", cputime64_to_clock_t(softirq));
-       seq_put_decimal_ull(p, " ", cputime64_to_clock_t(steal));
-       seq_put_decimal_ull(p, " ", cputime64_to_clock_t(guest));
-       seq_put_decimal_ull(p, " ", cputime64_to_clock_t(guest_nice));
+       seq_put_decimal_ull(p, "cpu  ", nsec_to_clock_t(user));
+       seq_put_decimal_ull(p, " ", nsec_to_clock_t(nice));
+       seq_put_decimal_ull(p, " ", nsec_to_clock_t(system));
+       seq_put_decimal_ull(p, " ", nsec_to_clock_t(idle));
+       seq_put_decimal_ull(p, " ", nsec_to_clock_t(iowait));
+       seq_put_decimal_ull(p, " ", nsec_to_clock_t(irq));
+       seq_put_decimal_ull(p, " ", nsec_to_clock_t(softirq));
+       seq_put_decimal_ull(p, " ", nsec_to_clock_t(steal));
+       seq_put_decimal_ull(p, " ", nsec_to_clock_t(guest));
+       seq_put_decimal_ull(p, " ", nsec_to_clock_t(guest_nice));
        seq_putc(p, '\n');
 
        for_each_online_cpu(i) {
@@ -140,16 +140,16 @@ static int show_stat(struct seq_file *p, void *v)
                guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
                guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
                seq_printf(p, "cpu%d", i);
-               seq_put_decimal_ull(p, " ", cputime64_to_clock_t(user));
-               seq_put_decimal_ull(p, " ", cputime64_to_clock_t(nice));
-               seq_put_decimal_ull(p, " ", cputime64_to_clock_t(system));
-               seq_put_decimal_ull(p, " ", cputime64_to_clock_t(idle));
-               seq_put_decimal_ull(p, " ", cputime64_to_clock_t(iowait));
-               seq_put_decimal_ull(p, " ", cputime64_to_clock_t(irq));
-               seq_put_decimal_ull(p, " ", cputime64_to_clock_t(softirq));
-               seq_put_decimal_ull(p, " ", cputime64_to_clock_t(steal));
-               seq_put_decimal_ull(p, " ", cputime64_to_clock_t(guest));
-               seq_put_decimal_ull(p, " ", cputime64_to_clock_t(guest_nice));
+               seq_put_decimal_ull(p, " ", nsec_to_clock_t(user));
+               seq_put_decimal_ull(p, " ", nsec_to_clock_t(nice));
+               seq_put_decimal_ull(p, " ", nsec_to_clock_t(system));
+               seq_put_decimal_ull(p, " ", nsec_to_clock_t(idle));
+               seq_put_decimal_ull(p, " ", nsec_to_clock_t(iowait));
+               seq_put_decimal_ull(p, " ", nsec_to_clock_t(irq));
+               seq_put_decimal_ull(p, " ", nsec_to_clock_t(softirq));
+               seq_put_decimal_ull(p, " ", nsec_to_clock_t(steal));
+               seq_put_decimal_ull(p, " ", nsec_to_clock_t(guest));
+               seq_put_decimal_ull(p, " ", nsec_to_clock_t(guest_nice));
                seq_putc(p, '\n');
        }
        seq_put_decimal_ull(p, "intr ", (unsigned long long)sum);
index 33de567c25af4b04a6ba283caaf5e4f6d9b4ecdb..7981c4ffe787a6afb80ae5a3017cef39f744c984 100644 (file)
@@ -5,23 +5,20 @@
 #include <linux/seq_file.h>
 #include <linux/time.h>
 #include <linux/kernel_stat.h>
-#include <linux/cputime.h>
 
 static int uptime_proc_show(struct seq_file *m, void *v)
 {
        struct timespec uptime;
        struct timespec idle;
-       u64 idletime;
        u64 nsec;
        u32 rem;
        int i;
 
-       idletime = 0;
+       nsec = 0;
        for_each_possible_cpu(i)
-               idletime += (__force u64) kcpustat_cpu(i).cpustat[CPUTIME_IDLE];
+               nsec += (__force u64) kcpustat_cpu(i).cpustat[CPUTIME_IDLE];
 
        get_monotonic_boottime(&uptime);
-       nsec = cputime64_to_jiffies64(idletime) * TICK_NSEC;
        idle.tv_sec = div_u64_rem(nsec, NSEC_PER_SEC, &rem);
        idle.tv_nsec = rem;
        seq_printf(m, "%lu.%02lu %lu.%02lu\n",
index 27c059e1760a8918a0dde19cb884c5ff4563606c..1d887efaaf718c497a233b63286914cd9a498a00 100644 (file)
@@ -280,7 +280,7 @@ static ssize_t ramoops_pstore_read(u64 *id, enum pstore_type_id *type,
                                           1, id, type, PSTORE_TYPE_PMSG, 0);
 
        /* ftrace is last since it may want to dynamically allocate memory. */
-       if (!prz_ok(prz)) {
+       if (!prz_ok(prz) && cxt->fprzs) {
                if (!(cxt->flags & RAMOOPS_FLAG_FTRACE_PER_CPU)) {
                        prz = ramoops_get_next_prz(cxt->fprzs,
                                        &cxt->ftrace_read_cnt, 1, id, type,
index d0f8a38dfafacd8f3d524d1ff69ae8f621eea278..0186fe6d39f3b4d2e77497d4d34a7691204ae9fa 100644 (file)
@@ -74,6 +74,7 @@
 #include <linux/highmem.h>
 #include <linux/pagemap.h>
 #include <linux/uaccess.h>
+#include <linux/major.h>
 #include "internal.h"
 
 static struct kmem_cache *romfs_inode_cachep;
@@ -416,7 +417,22 @@ static void romfs_destroy_inode(struct inode *inode)
 static int romfs_statfs(struct dentry *dentry, struct kstatfs *buf)
 {
        struct super_block *sb = dentry->d_sb;
-       u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
+       u64 id = 0;
+
+       /* When calling huge_encode_dev(),
+        * use sb->s_bdev->bd_dev when,
+        *   - CONFIG_ROMFS_ON_BLOCK defined
+        * use sb->s_dev when,
+        *   - CONFIG_ROMFS_ON_BLOCK undefined and
+        *   - CONFIG_ROMFS_ON_MTD defined
+        * leave id as 0 when,
+        *   - CONFIG_ROMFS_ON_BLOCK undefined and
+        *   - CONFIG_ROMFS_ON_MTD undefined
+        */
+       if (sb->s_bdev)
+               id = huge_encode_dev(sb->s_bdev->bd_dev);
+       else if (sb->s_dev)
+               id = huge_encode_dev(sb->s_dev);
 
        buf->f_type = ROMFS_MAGIC;
        buf->f_namelen = ROMFS_MAXFN;
@@ -489,6 +505,11 @@ static int romfs_fill_super(struct super_block *sb, void *data, int silent)
        sb->s_flags |= MS_RDONLY | MS_NOATIME;
        sb->s_op = &romfs_super_ops;
 
+#ifdef CONFIG_ROMFS_ON_MTD
+       /* Use same dev ID from the underlying mtdblock device */
+       if (sb->s_mtd)
+               sb->s_dev = MKDEV(MTD_BLOCK_MAJOR, sb->s_mtd->index);
+#endif
        /* read the image superblock and check it */
        rsb = kmalloc(512, GFP_KERNEL);
        if (!rsb)
index 873d83104e79aed14a24c417f211e38ae4038122..4ef78aa8ef61e8d0f091fe4088bc7c28035b5ef1 100644 (file)
@@ -204,6 +204,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
                buf->len = spd->partial[page_nr].len;
                buf->private = spd->partial[page_nr].private;
                buf->ops = spd->ops;
+               buf->flags = 0;
 
                pipe->nrbufs++;
                page_nr++;
index c173cc196175b4e4f72a6553b0faba67cdead250..384fa759a563341b309df47537f9c94da0ee9ebb 100644 (file)
@@ -40,6 +40,7 @@ struct timerfd_ctx {
        short unsigned settime_flags;   /* to show in fdinfo */
        struct rcu_head rcu;
        struct list_head clist;
+       spinlock_t cancel_lock;
        bool might_cancel;
 };
 
@@ -112,7 +113,7 @@ void timerfd_clock_was_set(void)
        rcu_read_unlock();
 }
 
-static void timerfd_remove_cancel(struct timerfd_ctx *ctx)
+static void __timerfd_remove_cancel(struct timerfd_ctx *ctx)
 {
        if (ctx->might_cancel) {
                ctx->might_cancel = false;
@@ -122,6 +123,13 @@ static void timerfd_remove_cancel(struct timerfd_ctx *ctx)
        }
 }
 
+static void timerfd_remove_cancel(struct timerfd_ctx *ctx)
+{
+       spin_lock(&ctx->cancel_lock);
+       __timerfd_remove_cancel(ctx);
+       spin_unlock(&ctx->cancel_lock);
+}
+
 static bool timerfd_canceled(struct timerfd_ctx *ctx)
 {
        if (!ctx->might_cancel || ctx->moffs != KTIME_MAX)
@@ -132,6 +140,7 @@ static bool timerfd_canceled(struct timerfd_ctx *ctx)
 
 static void timerfd_setup_cancel(struct timerfd_ctx *ctx, int flags)
 {
+       spin_lock(&ctx->cancel_lock);
        if ((ctx->clockid == CLOCK_REALTIME ||
             ctx->clockid == CLOCK_REALTIME_ALARM) &&
            (flags & TFD_TIMER_ABSTIME) && (flags & TFD_TIMER_CANCEL_ON_SET)) {
@@ -141,9 +150,10 @@ static void timerfd_setup_cancel(struct timerfd_ctx *ctx, int flags)
                        list_add_rcu(&ctx->clist, &cancel_list);
                        spin_unlock(&cancel_lock);
                }
-       } else if (ctx->might_cancel) {
-               timerfd_remove_cancel(ctx);
+       } else {
+               __timerfd_remove_cancel(ctx);
        }
+       spin_unlock(&ctx->cancel_lock);
 }
 
 static ktime_t timerfd_get_remaining(struct timerfd_ctx *ctx)
@@ -400,6 +410,7 @@ SYSCALL_DEFINE2(timerfd_create, int, clockid, int, flags)
                return -ENOMEM;
 
        init_waitqueue_head(&ctx->wqh);
+       spin_lock_init(&ctx->cancel_lock);
        ctx->clockid = clockid;
 
        if (isalarm(ctx))
index d96e2f30084bcfab552ffe3005af090abfe319c9..43953e03c35682723c6658dfe9b8cceed9de22ef 100644 (file)
@@ -63,6 +63,7 @@ struct userfaultfd_wait_queue {
        struct uffd_msg msg;
        wait_queue_t wq;
        struct userfaultfd_ctx *ctx;
+       bool waken;
 };
 
 struct userfaultfd_wake_range {
@@ -86,6 +87,12 @@ static int userfaultfd_wake_function(wait_queue_t *wq, unsigned mode,
        if (len && (start > uwq->msg.arg.pagefault.address ||
                    start + len <= uwq->msg.arg.pagefault.address))
                goto out;
+       WRITE_ONCE(uwq->waken, true);
+       /*
+        * The implicit smp_mb__before_spinlock in try_to_wake_up()
+        * renders uwq->waken visible to other CPUs before the task is
+        * waken.
+        */
        ret = wake_up_state(wq->private, mode);
        if (ret)
                /*
@@ -264,6 +271,7 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason)
        struct userfaultfd_wait_queue uwq;
        int ret;
        bool must_wait, return_to_userland;
+       long blocking_state;
 
        BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
 
@@ -334,10 +342,13 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason)
        uwq.wq.private = current;
        uwq.msg = userfault_msg(vmf->address, vmf->flags, reason);
        uwq.ctx = ctx;
+       uwq.waken = false;
 
        return_to_userland =
                (vmf->flags & (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE)) ==
                (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE);
+       blocking_state = return_to_userland ? TASK_INTERRUPTIBLE :
+                        TASK_KILLABLE;
 
        spin_lock(&ctx->fault_pending_wqh.lock);
        /*
@@ -350,8 +361,7 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason)
         * following the spin_unlock to happen before the list_add in
         * __add_wait_queue.
         */
-       set_current_state(return_to_userland ? TASK_INTERRUPTIBLE :
-                         TASK_KILLABLE);
+       set_current_state(blocking_state);
        spin_unlock(&ctx->fault_pending_wqh.lock);
 
        must_wait = userfaultfd_must_wait(ctx, vmf->address, vmf->flags,
@@ -364,6 +374,29 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason)
                wake_up_poll(&ctx->fd_wqh, POLLIN);
                schedule();
                ret |= VM_FAULT_MAJOR;
+
+               /*
+                * False wakeups can orginate even from rwsem before
+                * up_read() however userfaults will wait either for a
+                * targeted wakeup on the specific uwq waitqueue from
+                * wake_userfault() or for signals or for uffd
+                * release.
+                */
+               while (!READ_ONCE(uwq.waken)) {
+                       /*
+                        * This needs the full smp_store_mb()
+                        * guarantee as the state write must be
+                        * visible to other CPUs before reading
+                        * uwq.waken from other CPUs.
+                        */
+                       set_current_state(blocking_state);
+                       if (READ_ONCE(uwq.waken) ||
+                           READ_ONCE(ctx->released) ||
+                           (return_to_userland ? signal_pending(current) :
+                            fatal_signal_pending(current)))
+                               break;
+                       schedule();
+               }
        }
 
        __set_current_state(TASK_RUNNING);
index d346d42c54d1590250040f0b36c05367287e7bf5..33db69be4832c7bb702f34589d715956d7a871fe 100644 (file)
@@ -39,6 +39,7 @@
 #include "xfs_rmap_btree.h"
 #include "xfs_btree.h"
 #include "xfs_refcount_btree.h"
+#include "xfs_ialloc_btree.h"
 
 /*
  * Per-AG Block Reservations
@@ -200,22 +201,30 @@ __xfs_ag_resv_init(
        struct xfs_mount                *mp = pag->pag_mount;
        struct xfs_ag_resv              *resv;
        int                             error;
+       xfs_extlen_t                    reserved;
 
-       resv = xfs_perag_resv(pag, type);
        if (used > ask)
                ask = used;
-       resv->ar_asked = ask;
-       resv->ar_reserved = resv->ar_orig_reserved = ask - used;
-       mp->m_ag_max_usable -= ask;
+       reserved = ask - used;
 
-       trace_xfs_ag_resv_init(pag, type, ask);
-
-       error = xfs_mod_fdblocks(mp, -(int64_t)resv->ar_reserved, true);
-       if (error)
+       error = xfs_mod_fdblocks(mp, -(int64_t)reserved, true);
+       if (error) {
                trace_xfs_ag_resv_init_error(pag->pag_mount, pag->pag_agno,
                                error, _RET_IP_);
+               xfs_warn(mp,
+"Per-AG reservation for AG %u failed.  Filesystem may run out of space.",
+                               pag->pag_agno);
+               return error;
+       }
 
-       return error;
+       mp->m_ag_max_usable -= ask;
+
+       resv = xfs_perag_resv(pag, type);
+       resv->ar_asked = ask;
+       resv->ar_reserved = resv->ar_orig_reserved = reserved;
+
+       trace_xfs_ag_resv_init(pag, type, ask);
+       return 0;
 }
 
 /* Create a per-AG block reservation. */
@@ -223,6 +232,8 @@ int
 xfs_ag_resv_init(
        struct xfs_perag                *pag)
 {
+       struct xfs_mount                *mp = pag->pag_mount;
+       xfs_agnumber_t                  agno = pag->pag_agno;
        xfs_extlen_t                    ask;
        xfs_extlen_t                    used;
        int                             error = 0;
@@ -231,23 +242,45 @@ xfs_ag_resv_init(
        if (pag->pag_meta_resv.ar_asked == 0) {
                ask = used = 0;
 
-               error = xfs_refcountbt_calc_reserves(pag->pag_mount,
-                               pag->pag_agno, &ask, &used);
+               error = xfs_refcountbt_calc_reserves(mp, agno, &ask, &used);
                if (error)
                        goto out;
 
-               error = __xfs_ag_resv_init(pag, XFS_AG_RESV_METADATA,
-                               ask, used);
+               error = xfs_finobt_calc_reserves(mp, agno, &ask, &used);
                if (error)
                        goto out;
+
+               error = __xfs_ag_resv_init(pag, XFS_AG_RESV_METADATA,
+                               ask, used);
+               if (error) {
+                       /*
+                        * Because we didn't have per-AG reservations when the
+                        * finobt feature was added we might not be able to
+                        * reserve all needed blocks.  Warn and fall back to the
+                        * old and potentially buggy code in that case, but
+                        * ensure we do have the reservation for the refcountbt.
+                        */
+                       ask = used = 0;
+
+                       mp->m_inotbt_nores = true;
+
+                       error = xfs_refcountbt_calc_reserves(mp, agno, &ask,
+                                       &used);
+                       if (error)
+                               goto out;
+
+                       error = __xfs_ag_resv_init(pag, XFS_AG_RESV_METADATA,
+                                       ask, used);
+                       if (error)
+                               goto out;
+               }
        }
 
        /* Create the AGFL metadata reservation */
        if (pag->pag_agfl_resv.ar_asked == 0) {
                ask = used = 0;
 
-               error = xfs_rmapbt_calc_reserves(pag->pag_mount, pag->pag_agno,
-                               &ask, &used);
+               error = xfs_rmapbt_calc_reserves(mp, agno, &ask, &used);
                if (error)
                        goto out;
 
@@ -256,9 +289,16 @@ xfs_ag_resv_init(
                        goto out;
        }
 
+#ifdef DEBUG
+       /* need to read in the AGF for the ASSERT below to work */
+       error = xfs_alloc_pagf_init(pag->pag_mount, NULL, pag->pag_agno, 0);
+       if (error)
+               return error;
+
        ASSERT(xfs_perag_resv(pag, XFS_AG_RESV_METADATA)->ar_reserved +
               xfs_perag_resv(pag, XFS_AG_RESV_AGFL)->ar_reserved <=
               pag->pagf_freeblks + pag->pagf_flcount);
+#endif
 out:
        return error;
 }
index af1ecb19121e9e8569c0ee907652405575d882c8..6622d46ddec3890c0356dab26fbff448436584d7 100644 (file)
@@ -131,9 +131,6 @@ xfs_attr_get(
        if (XFS_FORCED_SHUTDOWN(ip->i_mount))
                return -EIO;
 
-       if (!xfs_inode_hasattr(ip))
-               return -ENOATTR;
-
        error = xfs_attr_args_init(&args, ip, name, flags);
        if (error)
                return error;
@@ -392,9 +389,6 @@ xfs_attr_remove(
        if (XFS_FORCED_SHUTDOWN(dp->i_mount))
                return -EIO;
 
-       if (!xfs_inode_hasattr(dp))
-               return -ENOATTR;
-
        error = xfs_attr_args_init(&args, dp, name, flags);
        if (error)
                return error;
index 44773c9eb957dd8f6c5904ef9d41c3de880d14be..bfc00de5c6f17a75c7addae26e709ee6cbfde3a9 100644 (file)
@@ -3629,7 +3629,7 @@ xfs_bmap_btalloc(
                align = xfs_get_cowextsz_hint(ap->ip);
        else if (xfs_alloc_is_userdata(ap->datatype))
                align = xfs_get_extsz_hint(ap->ip);
-       if (unlikely(align)) {
+       if (align) {
                error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
                                                align, 0, ap->eof, 0, ap->conv,
                                                &ap->offset, &ap->length);
@@ -3701,7 +3701,7 @@ xfs_bmap_btalloc(
                args.minlen = ap->minlen;
        }
        /* apply extent size hints if obtained earlier */
-       if (unlikely(align)) {
+       if (align) {
                args.prod = align;
                if ((args.mod = (xfs_extlen_t)do_mod(ap->offset, args.prod)))
                        args.mod = (xfs_extlen_t)(args.prod - args.mod);
@@ -4514,8 +4514,6 @@ xfs_bmapi_write(
        int                     n;              /* current extent index */
        xfs_fileoff_t           obno;           /* old block number (offset) */
        int                     whichfork;      /* data or attr fork */
-       char                    inhole;         /* current location is hole in file */
-       char                    wasdelay;       /* old extent was delayed */
 
 #ifdef DEBUG
        xfs_fileoff_t           orig_bno;       /* original block number value */
@@ -4603,22 +4601,44 @@ xfs_bmapi_write(
        bma.firstblock = firstblock;
 
        while (bno < end && n < *nmap) {
-               inhole = eof || bma.got.br_startoff > bno;
-               wasdelay = !inhole && isnullstartblock(bma.got.br_startblock);
+               bool                    need_alloc = false, wasdelay = false;
 
-               /*
-                * Make sure we only reflink into a hole.
-                */
-               if (flags & XFS_BMAPI_REMAP)
-                       ASSERT(inhole);
-               if (flags & XFS_BMAPI_COWFORK)
-                       ASSERT(!inhole);
+               /* in hole or beyoned EOF? */
+               if (eof || bma.got.br_startoff > bno) {
+                       if (flags & XFS_BMAPI_DELALLOC) {
+                               /*
+                                * For the COW fork we can reasonably get a
+                                * request for converting an extent that races
+                                * with other threads already having converted
+                                * part of it, as there converting COW to
+                                * regular blocks is not protected using the
+                                * IOLOCK.
+                                */
+                               ASSERT(flags & XFS_BMAPI_COWFORK);
+                               if (!(flags & XFS_BMAPI_COWFORK)) {
+                                       error = -EIO;
+                                       goto error0;
+                               }
+
+                               if (eof || bno >= end)
+                                       break;
+                       } else {
+                               need_alloc = true;
+                       }
+               } else {
+                       /*
+                        * Make sure we only reflink into a hole.
+                        */
+                       ASSERT(!(flags & XFS_BMAPI_REMAP));
+                       if (isnullstartblock(bma.got.br_startblock))
+                               wasdelay = true;
+               }
 
                /*
                 * First, deal with the hole before the allocated space
                 * that we found, if any.
                 */
-               if (inhole || wasdelay) {
+               if (need_alloc || wasdelay) {
                        bma.eof = eof;
                        bma.conv = !!(flags & XFS_BMAPI_CONVERT);
                        bma.wasdel = wasdelay;
index cecd094404cc53d821567be873bafdbba98880fa..cdef87db5262bdc2f43b17f5a1e76fb45a107233 100644 (file)
@@ -110,6 +110,9 @@ struct xfs_extent_free_item
 /* Map something in the CoW fork. */
 #define XFS_BMAPI_COWFORK      0x200
 
+/* Only convert delalloc space, don't allocate entirely new extents */
+#define XFS_BMAPI_DELALLOC     0x400
+
 #define XFS_BMAPI_FLAGS \
        { XFS_BMAPI_ENTIRE,     "ENTIRE" }, \
        { XFS_BMAPI_METADATA,   "METADATA" }, \
@@ -120,7 +123,8 @@ struct xfs_extent_free_item
        { XFS_BMAPI_CONVERT,    "CONVERT" }, \
        { XFS_BMAPI_ZERO,       "ZERO" }, \
        { XFS_BMAPI_REMAP,      "REMAP" }, \
-       { XFS_BMAPI_COWFORK,    "COWFORK" }
+       { XFS_BMAPI_COWFORK,    "COWFORK" }, \
+       { XFS_BMAPI_DELALLOC,   "DELALLOC" }
 
 
 static inline int xfs_bmapi_aflag(int w)
index 0fd086d03d4156cde3fc2851533d910cee046c1c..7c471881c9a67482bd6bf88df59bd6f2e63a57a7 100644 (file)
@@ -82,11 +82,12 @@ xfs_finobt_set_root(
 }
 
 STATIC int
-xfs_inobt_alloc_block(
+__xfs_inobt_alloc_block(
        struct xfs_btree_cur    *cur,
        union xfs_btree_ptr     *start,
        union xfs_btree_ptr     *new,
-       int                     *stat)
+       int                     *stat,
+       enum xfs_ag_resv_type   resv)
 {
        xfs_alloc_arg_t         args;           /* block allocation args */
        int                     error;          /* error return value */
@@ -103,6 +104,7 @@ xfs_inobt_alloc_block(
        args.maxlen = 1;
        args.prod = 1;
        args.type = XFS_ALLOCTYPE_NEAR_BNO;
+       args.resv = resv;
 
        error = xfs_alloc_vextent(&args);
        if (error) {
@@ -122,6 +124,27 @@ xfs_inobt_alloc_block(
        return 0;
 }
 
+STATIC int
+xfs_inobt_alloc_block(
+       struct xfs_btree_cur    *cur,
+       union xfs_btree_ptr     *start,
+       union xfs_btree_ptr     *new,
+       int                     *stat)
+{
+       return __xfs_inobt_alloc_block(cur, start, new, stat, XFS_AG_RESV_NONE);
+}
+
+STATIC int
+xfs_finobt_alloc_block(
+       struct xfs_btree_cur    *cur,
+       union xfs_btree_ptr     *start,
+       union xfs_btree_ptr     *new,
+       int                     *stat)
+{
+       return __xfs_inobt_alloc_block(cur, start, new, stat,
+                       XFS_AG_RESV_METADATA);
+}
+
 STATIC int
 xfs_inobt_free_block(
        struct xfs_btree_cur    *cur,
@@ -328,7 +351,7 @@ static const struct xfs_btree_ops xfs_finobt_ops = {
 
        .dup_cursor             = xfs_inobt_dup_cursor,
        .set_root               = xfs_finobt_set_root,
-       .alloc_block            = xfs_inobt_alloc_block,
+       .alloc_block            = xfs_finobt_alloc_block,
        .free_block             = xfs_inobt_free_block,
        .get_minrecs            = xfs_inobt_get_minrecs,
        .get_maxrecs            = xfs_inobt_get_maxrecs,
@@ -480,3 +503,64 @@ xfs_inobt_rec_check_count(
        return 0;
 }
 #endif /* DEBUG */
+
+static xfs_extlen_t
+xfs_inobt_max_size(
+       struct xfs_mount        *mp)
+{
+       /* Bail out if we're uninitialized, which can happen in mkfs. */
+       if (mp->m_inobt_mxr[0] == 0)
+               return 0;
+
+       return xfs_btree_calc_size(mp, mp->m_inobt_mnr,
+               (uint64_t)mp->m_sb.sb_agblocks * mp->m_sb.sb_inopblock /
+                               XFS_INODES_PER_CHUNK);
+}
+
+static int
+xfs_inobt_count_blocks(
+       struct xfs_mount        *mp,
+       xfs_agnumber_t          agno,
+       xfs_btnum_t             btnum,
+       xfs_extlen_t            *tree_blocks)
+{
+       struct xfs_buf          *agbp;
+       struct xfs_btree_cur    *cur;
+       int                     error;
+
+       error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp);
+       if (error)
+               return error;
+
+       cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno, btnum);
+       error = xfs_btree_count_blocks(cur, tree_blocks);
+       xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
+       xfs_buf_relse(agbp);
+
+       return error;
+}
+
+/*
+ * Figure out how many blocks to reserve and how many are used by this btree.
+ */
+int
+xfs_finobt_calc_reserves(
+       struct xfs_mount        *mp,
+       xfs_agnumber_t          agno,
+       xfs_extlen_t            *ask,
+       xfs_extlen_t            *used)
+{
+       xfs_extlen_t            tree_len = 0;
+       int                     error;
+
+       if (!xfs_sb_version_hasfinobt(&mp->m_sb))
+               return 0;
+
+       error = xfs_inobt_count_blocks(mp, agno, XFS_BTNUM_FINO, &tree_len);
+       if (error)
+               return error;
+
+       *ask += xfs_inobt_max_size(mp);
+       *used += tree_len;
+       return 0;
+}
index bd88453217ceca0466fbd07e409707ab7e9354b3..aa81e2e63f3f95da8434798e8b2bf6501491eac3 100644 (file)
@@ -72,4 +72,7 @@ int xfs_inobt_rec_check_count(struct xfs_mount *,
 #define xfs_inobt_rec_check_count(mp, rec)     0
 #endif /* DEBUG */
 
+int xfs_finobt_calc_reserves(struct xfs_mount *mp, xfs_agnumber_t agno,
+               xfs_extlen_t *ask, xfs_extlen_t *used);
+
 #endif /* __XFS_IALLOC_BTREE_H__ */
index 2580262e4ea00c3dc728b041dca125f4f7078373..584ec896a53374f81da05906d517fba717686504 100644 (file)
@@ -242,7 +242,7 @@ xfs_mount_validate_sb(
            sbp->sb_blocklog < XFS_MIN_BLOCKSIZE_LOG                    ||
            sbp->sb_blocklog > XFS_MAX_BLOCKSIZE_LOG                    ||
            sbp->sb_blocksize != (1 << sbp->sb_blocklog)                ||
-           sbp->sb_dirblklog > XFS_MAX_BLOCKSIZE_LOG                   ||
+           sbp->sb_dirblklog + sbp->sb_blocklog > XFS_MAX_BLOCKSIZE_LOG ||
            sbp->sb_inodesize < XFS_DINODE_MIN_SIZE                     ||
            sbp->sb_inodesize > XFS_DINODE_MAX_SIZE                     ||
            sbp->sb_inodelog < XFS_DINODE_MIN_LOG                       ||
index b9abce524c33b1af8581e8d52d032685fb767747..c1417919ab0a67eb61e97b77746d71c90c392776 100644 (file)
@@ -528,7 +528,6 @@ xfs_getbmap(
        xfs_bmbt_irec_t         *map;           /* buffer for user's data */
        xfs_mount_t             *mp;            /* file system mount point */
        int                     nex;            /* # of user extents can do */
-       int                     nexleft;        /* # of user extents left */
        int                     subnex;         /* # of bmapi's can do */
        int                     nmap;           /* number of map entries */
        struct getbmapx         *out;           /* output structure */
@@ -686,10 +685,8 @@ xfs_getbmap(
                goto out_free_map;
        }
 
-       nexleft = nex;
-
        do {
-               nmap = (nexleft > subnex) ? subnex : nexleft;
+               nmap = (nex> subnex) ? subnex : nex;
                error = xfs_bmapi_read(ip, XFS_BB_TO_FSBT(mp, bmv->bmv_offset),
                                       XFS_BB_TO_FSB(mp, bmv->bmv_length),
                                       map, &nmap, bmapi_flags);
@@ -697,8 +694,8 @@ xfs_getbmap(
                        goto out_free_map;
                ASSERT(nmap <= subnex);
 
-               for (i = 0; i < nmap && nexleft && bmv->bmv_length &&
-                               cur_ext < bmv->bmv_count; i++) {
+               for (i = 0; i < nmap && bmv->bmv_length &&
+                               cur_ext < bmv->bmv_count - 1; i++) {
                        out[cur_ext].bmv_oflags = 0;
                        if (map[i].br_state == XFS_EXT_UNWRITTEN)
                                out[cur_ext].bmv_oflags |= BMV_OF_PREALLOC;
@@ -760,16 +757,27 @@ xfs_getbmap(
                                continue;
                        }
 
+                       /*
+                        * In order to report shared extents accurately,
+                        * we report each distinct shared/unshared part
+                        * of a single bmbt record using multiple bmap
+                        * extents.  To make that happen, we iterate the
+                        * same map array item multiple times, each
+                        * time trimming out the subextent that we just
+                        * reported.
+                        *
+                        * Because of this, we must check the out array
+                        * index (cur_ext) directly against bmv_count-1
+                        * to avoid overflows.
+                        */
                        if (inject_map.br_startblock != NULLFSBLOCK) {
                                map[i] = inject_map;
                                i--;
-                       } else
-                               nexleft--;
+                       }
                        bmv->bmv_entries++;
                        cur_ext++;
                }
-       } while (nmap && nexleft && bmv->bmv_length &&
-                cur_ext < bmv->bmv_count);
+       } while (nmap && bmv->bmv_length && cur_ext < bmv->bmv_count - 1);
 
  out_free_map:
        kmem_free(map);
index 7f0a01f7b592d20932649d1f8a705a836d86ca02..ac3b4db519df8ee5c03fc759295028d6316f474e 100644 (file)
@@ -422,6 +422,7 @@ retry:
 out_free_pages:
        for (i = 0; i < bp->b_page_count; i++)
                __free_page(bp->b_pages[i]);
+       bp->b_flags &= ~_XBF_PAGES;
        return error;
 }
 
index b9557795eb74d4249b34ffd97db6faf0354f8be0..de32f0fe47c8e00d3163b808ebec9c42edefb70b 100644 (file)
@@ -1792,22 +1792,23 @@ xfs_inactive_ifree(
        int                     error;
 
        /*
-        * The ifree transaction might need to allocate blocks for record
-        * insertion to the finobt. We don't want to fail here at ENOSPC, so
-        * allow ifree to dip into the reserved block pool if necessary.
-        *
-        * Freeing large sets of inodes generally means freeing inode chunks,
-        * directory and file data blocks, so this should be relatively safe.
-        * Only under severe circumstances should it be possible to free enough
-        * inodes to exhaust the reserve block pool via finobt expansion while
-        * at the same time not creating free space in the filesystem.
+        * We try to use a per-AG reservation for any block needed by the finobt
+        * tree, but as the finobt feature predates the per-AG reservation
+        * support a degraded file system might not have enough space for the
+        * reservation at mount time.  In that case try to dip into the reserved
+        * pool and pray.
         *
         * Send a warning if the reservation does happen to fail, as the inode
         * now remains allocated and sits on the unlinked list until the fs is
         * repaired.
         */
-       error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree,
-                       XFS_IFREE_SPACE_RES(mp), 0, XFS_TRANS_RESERVE, &tp);
+       if (unlikely(mp->m_inotbt_nores)) {
+               error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree,
+                               XFS_IFREE_SPACE_RES(mp), 0, XFS_TRANS_RESERVE,
+                               &tp);
+       } else {
+               error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree, 0, 0, 0, &tp);
+       }
        if (error) {
                if (error == -ENOSPC) {
                        xfs_warn_ratelimited(mp,
index 0d147428971e0c21c88aa90e2628001cf582971c..1aa3abd67b36670f60b9daf7672c243307c77438 100644 (file)
@@ -681,7 +681,7 @@ xfs_iomap_write_allocate(
        xfs_trans_t     *tp;
        int             nimaps;
        int             error = 0;
-       int             flags = 0;
+       int             flags = XFS_BMAPI_DELALLOC;
        int             nres;
 
        if (whichfork == XFS_COW_FORK)
index 84f785218907434276d12e6926d8e85974a14477..7f351f706b7a2938292680801c9fa0bf51458c1c 100644 (file)
@@ -140,6 +140,7 @@ typedef struct xfs_mount {
        int                     m_fixedfsid[2]; /* unchanged for life of FS */
        uint                    m_dmevmask;     /* DMI events for this FS */
        __uint64_t              m_flags;        /* global mount flags */
+       bool                    m_inotbt_nores; /* no per-AG finobt resv. */
        int                     m_ialloc_inos;  /* inodes in inode allocation */
        int                     m_ialloc_blks;  /* blocks in inode allocation */
        int                     m_ialloc_min_blks;/* min blocks in sparse inode
index 45e50ea90769f15d80e3da57e9a6d1721d12a21a..b669b123287bb115e561a6ea1c0be8ae4db359db 100644 (file)
@@ -1177,7 +1177,8 @@ xfs_qm_dqusage_adjust(
         * the case in all other instances. It's OK that we do this because
         * quotacheck is done only at mount time.
         */
-       error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_EXCL, &ip);
+       error = xfs_iget(mp, NULL, ino, XFS_IGET_DONTCACHE, XFS_ILOCK_EXCL,
+                        &ip);
        if (error) {
                *res = BULKSTAT_RV_NOTHING;
                return error;
diff --git a/include/asm-generic/cputime.h b/include/asm-generic/cputime.h
deleted file mode 100644 (file)
index 5196943..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-#ifndef _ASM_GENERIC_CPUTIME_H
-#define _ASM_GENERIC_CPUTIME_H
-
-#include <linux/time.h>
-#include <linux/jiffies.h>
-
-#ifndef CONFIG_VIRT_CPU_ACCOUNTING
-# include <asm-generic/cputime_jiffies.h>
-#endif
-
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
-# include <asm-generic/cputime_nsecs.h>
-#endif
-
-#endif
diff --git a/include/asm-generic/cputime_jiffies.h b/include/asm-generic/cputime_jiffies.h
deleted file mode 100644 (file)
index 6bb8cd4..0000000
+++ /dev/null
@@ -1,75 +0,0 @@
-#ifndef _ASM_GENERIC_CPUTIME_JIFFIES_H
-#define _ASM_GENERIC_CPUTIME_JIFFIES_H
-
-typedef unsigned long __nocast cputime_t;
-
-#define cmpxchg_cputime(ptr, old, new) cmpxchg(ptr, old, new)
-
-#define cputime_one_jiffy              jiffies_to_cputime(1)
-#define cputime_to_jiffies(__ct)       (__force unsigned long)(__ct)
-#define jiffies_to_cputime(__hz)       (__force cputime_t)(__hz)
-
-typedef u64 __nocast cputime64_t;
-
-#define cputime64_to_jiffies64(__ct)   (__force u64)(__ct)
-#define jiffies64_to_cputime64(__jif)  (__force cputime64_t)(__jif)
-
-
-/*
- * Convert nanoseconds <-> cputime
- */
-#define cputime_to_nsecs(__ct)         \
-       jiffies_to_nsecs(cputime_to_jiffies(__ct))
-#define nsecs_to_cputime64(__nsec)     \
-       jiffies64_to_cputime64(nsecs_to_jiffies64(__nsec))
-#define nsecs_to_cputime(__nsec)       \
-       jiffies_to_cputime(nsecs_to_jiffies(__nsec))
-
-
-/*
- * Convert cputime to microseconds and back.
- */
-#define cputime_to_usecs(__ct)         \
-       jiffies_to_usecs(cputime_to_jiffies(__ct))
-#define usecs_to_cputime(__usec)       \
-       jiffies_to_cputime(usecs_to_jiffies(__usec))
-#define usecs_to_cputime64(__usec)     \
-       jiffies64_to_cputime64(nsecs_to_jiffies64((__usec) * 1000))
-
-/*
- * Convert cputime to seconds and back.
- */
-#define cputime_to_secs(jif)           (cputime_to_jiffies(jif) / HZ)
-#define secs_to_cputime(sec)           jiffies_to_cputime((sec) * HZ)
-
-/*
- * Convert cputime to timespec and back.
- */
-#define timespec_to_cputime(__val)     \
-       jiffies_to_cputime(timespec_to_jiffies(__val))
-#define cputime_to_timespec(__ct,__val)        \
-       jiffies_to_timespec(cputime_to_jiffies(__ct),__val)
-
-/*
- * Convert cputime to timeval and back.
- */
-#define timeval_to_cputime(__val)      \
-       jiffies_to_cputime(timeval_to_jiffies(__val))
-#define cputime_to_timeval(__ct,__val) \
-       jiffies_to_timeval(cputime_to_jiffies(__ct),__val)
-
-/*
- * Convert cputime to clock and back.
- */
-#define cputime_to_clock_t(__ct)       \
-       jiffies_to_clock_t(cputime_to_jiffies(__ct))
-#define clock_t_to_cputime(__x)                \
-       jiffies_to_cputime(clock_t_to_jiffies(__x))
-
-/*
- * Convert cputime64 to clock.
- */
-#define cputime64_to_clock_t(__ct)     \
-       jiffies_64_to_clock_t(cputime64_to_jiffies64(__ct))
-
-#endif
diff --git a/include/asm-generic/cputime_nsecs.h b/include/asm-generic/cputime_nsecs.h
deleted file mode 100644 (file)
index 4e3b18e..0000000
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * Definitions for measuring cputime in nsecs resolution.
- *
- * Based on <arch/ia64/include/asm/cputime.h>
- *
- * Copyright (C) 2007 FUJITSU LIMITED
- * Copyright (C) 2007 Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
- */
-
-#ifndef _ASM_GENERIC_CPUTIME_NSECS_H
-#define _ASM_GENERIC_CPUTIME_NSECS_H
-
-#include <linux/math64.h>
-
-typedef u64 __nocast cputime_t;
-typedef u64 __nocast cputime64_t;
-
-#define cmpxchg_cputime(ptr, old, new) cmpxchg64(ptr, old, new)
-
-#define cputime_one_jiffy              jiffies_to_cputime(1)
-
-#define cputime_div(__ct, divisor)  div_u64((__force u64)__ct, divisor)
-#define cputime_div_rem(__ct, divisor, remainder) \
-       div_u64_rem((__force u64)__ct, divisor, remainder);
-
-/*
- * Convert cputime <-> jiffies (HZ)
- */
-#define cputime_to_jiffies(__ct)       \
-       cputime_div(__ct, NSEC_PER_SEC / HZ)
-#define jiffies_to_cputime(__jif)      \
-       (__force cputime_t)((__jif) * (NSEC_PER_SEC / HZ))
-#define cputime64_to_jiffies64(__ct)   \
-       cputime_div(__ct, NSEC_PER_SEC / HZ)
-#define jiffies64_to_cputime64(__jif)  \
-       (__force cputime64_t)((__jif) * (NSEC_PER_SEC / HZ))
-
-
-/*
- * Convert cputime <-> nanoseconds
- */
-#define cputime_to_nsecs(__ct)         \
-       (__force u64)(__ct)
-#define nsecs_to_cputime(__nsecs)      \
-       (__force cputime_t)(__nsecs)
-#define nsecs_to_cputime64(__nsecs)    \
-       (__force cputime64_t)(__nsecs)
-
-
-/*
- * Convert cputime <-> microseconds
- */
-#define cputime_to_usecs(__ct)         \
-       cputime_div(__ct, NSEC_PER_USEC)
-#define usecs_to_cputime(__usecs)      \
-       (__force cputime_t)((__usecs) * NSEC_PER_USEC)
-#define usecs_to_cputime64(__usecs)    \
-       (__force cputime64_t)((__usecs) * NSEC_PER_USEC)
-
-/*
- * Convert cputime <-> seconds
- */
-#define cputime_to_secs(__ct)          \
-       cputime_div(__ct, NSEC_PER_SEC)
-#define secs_to_cputime(__secs)                \
-       (__force cputime_t)((__secs) * NSEC_PER_SEC)
-
-/*
- * Convert cputime <-> timespec (nsec)
- */
-static inline cputime_t timespec_to_cputime(const struct timespec *val)
-{
-       u64 ret = (u64)val->tv_sec * NSEC_PER_SEC + val->tv_nsec;
-       return (__force cputime_t) ret;
-}
-static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val)
-{
-       u32 rem;
-
-       val->tv_sec = cputime_div_rem(ct, NSEC_PER_SEC, &rem);
-       val->tv_nsec = rem;
-}
-
-/*
- * Convert cputime <-> timeval (msec)
- */
-static inline cputime_t timeval_to_cputime(const struct timeval *val)
-{
-       u64 ret = (u64)val->tv_sec * NSEC_PER_SEC +
-                       val->tv_usec * NSEC_PER_USEC;
-       return (__force cputime_t) ret;
-}
-static inline void cputime_to_timeval(const cputime_t ct, struct timeval *val)
-{
-       u32 rem;
-
-       val->tv_sec = cputime_div_rem(ct, NSEC_PER_SEC, &rem);
-       val->tv_usec = rem / NSEC_PER_USEC;
-}
-
-/*
- * Convert cputime <-> clock (USER_HZ)
- */
-#define cputime_to_clock_t(__ct)       \
-       cputime_div(__ct, (NSEC_PER_SEC / USER_HZ))
-#define clock_t_to_cputime(__x)                \
-       (__force cputime_t)((__x) * (NSEC_PER_SEC / USER_HZ))
-
-/*
- * Convert cputime64 to clock.
- */
-#define cputime64_to_clock_t(__ct)     \
-       cputime_to_clock_t((__force cputime_t)__ct)
-
-#endif
index 63554e9f6e0c68595943e27d2734ad4fa8271007..719db1968d8177a91028fd5e8bd6068f5d71c491 100644 (file)
@@ -9,18 +9,15 @@
 #ifndef KSYM_ALIGN
 #define KSYM_ALIGN 8
 #endif
-#ifndef KCRC_ALIGN
-#define KCRC_ALIGN 8
-#endif
 #else
 #define __put .long
 #ifndef KSYM_ALIGN
 #define KSYM_ALIGN 4
 #endif
+#endif
 #ifndef KCRC_ALIGN
 #define KCRC_ALIGN 4
 #endif
-#endif
 
 #ifdef CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX
 #define KSYM(name) _##name
@@ -52,7 +49,11 @@ KSYM(__kstrtab_\name):
        .section ___kcrctab\sec+\name,"a"
        .balign KCRC_ALIGN
 KSYM(__kcrctab_\name):
-       __put KSYM(__crc_\name)
+#if defined(CONFIG_MODULE_REL_CRCS)
+       .long KSYM(__crc_\name) - .
+#else
+       .long KSYM(__crc_\name)
+#endif
        .weak KSYM(__crc_\name)
        .previous
 #endif
index 5be122e3d32605ad9e0f809b23ba5349d5118de4..6c6a2141f271cba9fd7fe56636718dc9ad04b550 100644 (file)
@@ -33,7 +33,7 @@
  */
 static inline void __down_read(struct rw_semaphore *sem)
 {
-       if (unlikely(atomic_long_inc_return_acquire((atomic_long_t *)&sem->count) <= 0))
+       if (unlikely(atomic_long_inc_return_acquire(&sem->count) <= 0))
                rwsem_down_read_failed(sem);
 }
 
@@ -58,7 +58,7 @@ static inline void __down_write(struct rw_semaphore *sem)
        long tmp;
 
        tmp = atomic_long_add_return_acquire(RWSEM_ACTIVE_WRITE_BIAS,
-                                    (atomic_long_t *)&sem->count);
+                                            &sem->count);
        if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
                rwsem_down_write_failed(sem);
 }
@@ -68,7 +68,7 @@ static inline int __down_write_killable(struct rw_semaphore *sem)
        long tmp;
 
        tmp = atomic_long_add_return_acquire(RWSEM_ACTIVE_WRITE_BIAS,
-                                    (atomic_long_t *)&sem->count);
+                                            &sem->count);
        if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
                if (IS_ERR(rwsem_down_write_failed_killable(sem)))
                        return -EINTR;
@@ -91,7 +91,7 @@ static inline void __up_read(struct rw_semaphore *sem)
 {
        long tmp;
 
-       tmp = atomic_long_dec_return_release((atomic_long_t *)&sem->count);
+       tmp = atomic_long_dec_return_release(&sem->count);
        if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0))
                rwsem_wake(sem);
 }
@@ -102,7 +102,7 @@ static inline void __up_read(struct rw_semaphore *sem)
 static inline void __up_write(struct rw_semaphore *sem)
 {
        if (unlikely(atomic_long_sub_return_release(RWSEM_ACTIVE_WRITE_BIAS,
-                                (atomic_long_t *)&sem->count) < 0))
+                                                   &sem->count) < 0))
                rwsem_wake(sem);
 }
 
@@ -120,8 +120,7 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
         * read-locked region is ok to be re-ordered into the
         * write side. As such, rely on RELEASE semantics.
         */
-       tmp = atomic_long_add_return_release(-RWSEM_WAITING_BIAS,
-                                    (atomic_long_t *)&sem->count);
+       tmp = atomic_long_add_return_release(-RWSEM_WAITING_BIAS, &sem->count);
        if (tmp < 0)
                rwsem_downgrade_wake(sem);
 }
index 192016e2b5183c7a22fd13fd21372de5862f44d8..9c4ee144b5f6b799baf92a9722cddcaee3eb2b4f 100644 (file)
@@ -517,6 +517,7 @@ struct drm_device {
        struct drm_minor *control;              /**< Control node */
        struct drm_minor *primary;              /**< Primary node */
        struct drm_minor *render;               /**< Render node */
+       bool registered;
 
        /* currently active master for this device. Protected by master_mutex */
        struct drm_master *master;
index d6d241f63b9f8e4c3a9310008f17b9439f7fa2ed..56814e8ae7ea91a4c9aeed59a2c6f9d2263fb2e4 100644 (file)
@@ -144,7 +144,7 @@ struct __drm_crtcs_state {
        struct drm_crtc *ptr;
        struct drm_crtc_state *state;
        struct drm_crtc_commit *commit;
-       s64 __user *out_fence_ptr;
+       s32 __user *out_fence_ptr;
 };
 
 struct __drm_connnectors_state {
index a9b95246e26efcf3d44cd5afc85f7031f0fa77fe..045a97cbeba24f44eb1b1d5582b4145801fcafdd 100644 (file)
@@ -381,6 +381,8 @@ struct drm_connector_funcs {
         * core drm connector interfaces. Everything added from this callback
         * should be unregistered in the early_unregister callback.
         *
+        * This is called while holding drm_connector->mutex.
+        *
         * Returns:
         *
         * 0 on success, or a negative error code on failure.
@@ -395,6 +397,8 @@ struct drm_connector_funcs {
         * late_register(). It is called from drm_connector_unregister(),
         * early in the driver unload sequence to disable userspace access
         * before data structures are torndown.
+        *
+        * This is called while holding drm_connector->mutex.
         */
        void (*early_unregister)(struct drm_connector *connector);
 
@@ -559,7 +563,6 @@ struct drm_cmdline_mode {
  * @interlace_allowed: can this connector handle interlaced modes?
  * @doublescan_allowed: can this connector handle doublescan?
  * @stereo_allowed: can this connector handle stereo modes?
- * @registered: is this connector exposed (registered) with userspace?
  * @modes: modes available on this connector (from fill_modes() + user)
  * @status: one of the drm_connector_status enums (connected, not, or unknown)
  * @probed_modes: list of modes derived directly from the display
@@ -607,6 +610,13 @@ struct drm_connector {
 
        char *name;
 
+       /**
+        * @mutex: Lock for general connector state, but currently only protects
+        * @registered. Most of the connector state is still protected by the
+        * mutex in &drm_mode_config.
+        */
+       struct mutex mutex;
+
        /**
         * @index: Compacted connector index, which matches the position inside
         * the mode_config.list for drivers not supporting hot-add/removing. Can
@@ -620,6 +630,10 @@ struct drm_connector {
        bool interlace_allowed;
        bool doublescan_allowed;
        bool stereo_allowed;
+       /**
+        * @registered: Is this connector exposed (registered) with userspace?
+        * Protected by @mutex.
+        */
        bool registered;
        struct list_head modes; /* list of modes on this connector */
 
index 1ddfa2928802249f8c5abefb80ed71577bb04c12..a232e7f0c8695090f7a86805aaef73bb024ef8df 100644 (file)
@@ -247,7 +247,7 @@ static inline void drm_framebuffer_unreference(struct drm_framebuffer *fb)
  */
 static inline uint32_t drm_framebuffer_read_refcount(struct drm_framebuffer *fb)
 {
-       return atomic_read(&fb->base.refcount.refcount);
+       return kref_read(&fb->base.refcount);
 }
 
 /**
index bf9991b20611a666d05445152d587446c5a834f5..137432386310aa8a9449d28f655a60b78bdd654a 100644 (file)
@@ -488,7 +488,7 @@ struct drm_mode_config {
        /**
         * @prop_out_fence_ptr: Sync File fd pointer representing the
         * outgoing fences for a CRTC. Userspace should provide a pointer to a
-        * value of type s64, and then cast that pointer to u64.
+        * value of type s32, and then cast that pointer to u64.
         */
        struct drm_property *prop_out_fence_ptr;
        /**
index 652e45be97c8a4021a3ee2219a04bf2020dce910..9a465314572c6cd5d43ed3ef755e157fabfb7169 100644 (file)
@@ -332,19 +332,6 @@ extern int ttm_bo_validate(struct ttm_buffer_object *bo,
  */
 extern void ttm_bo_unref(struct ttm_buffer_object **bo);
 
-
-/**
- * ttm_bo_list_ref_sub
- *
- * @bo: The buffer object.
- * @count: The number of references with which to decrease @bo::list_kref;
- * @never_free: The refcount should not reach zero with this operation.
- *
- * Release @count lru list references to this buffer object.
- */
-extern void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count,
-                               bool never_free);
-
 /**
  * ttm_bo_add_to_lru
  *
@@ -367,7 +354,7 @@ extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo);
  * and is usually called just immediately after the bo has been reserved to
  * avoid recursive reservation from lru lists.
  */
-extern int ttm_bo_del_from_lru(struct ttm_buffer_object *bo);
+extern void ttm_bo_del_from_lru(struct ttm_buffer_object *bo);
 
 /**
  * ttm_bo_move_to_lru_tail
index cdbdb40eb5bd3a83d1a2ea594c0de3efa9226c02..feecf33a1212279d19eee8d0dfd266be3ed99351 100644 (file)
@@ -878,7 +878,7 @@ static inline int ttm_bo_reserve(struct ttm_buffer_object *bo,
 {
        int ret;
 
-       WARN_ON(!atomic_read(&bo->kref.refcount));
+       WARN_ON(!kref_read(&bo->kref));
 
        ret = __ttm_bo_reserve(bo, interruptible, no_wait, ticket);
        if (likely(ret == 0))
@@ -903,7 +903,7 @@ static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
 {
        int ret = 0;
 
-       WARN_ON(!atomic_read(&bo->kref.refcount));
+       WARN_ON(!kref_read(&bo->kref));
 
        if (interruptible)
                ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
diff --git a/include/dt-bindings/thermal/lm90.h b/include/dt-bindings/thermal/lm90.h
new file mode 100644 (file)
index 0000000..8c2e309
--- /dev/null
@@ -0,0 +1,12 @@
+/*
+ * This header provides constants for the LM90 thermal bindings.
+ */
+
+#ifndef _DT_BINDINGS_THERMAL_LM90_H_
+#define _DT_BINDINGS_THERMAL_LM90_H_
+
+#define LM90_LOCAL_TEMPERATURE 0
+#define LM90_REMOTE_TEMPERATURE 1
+#define LM90_REMOTE2_TEMPERATURE 2
+
+#endif
index 5b36974ed60a55c748758c9327addaebf73912a4..8e577c2cb0ced43e058201cd32ebdf9d2750e9b2 100644 (file)
@@ -1153,4 +1153,14 @@ int parse_spcr(bool earlycon);
 static inline int parse_spcr(bool earlycon) { return 0; }
 #endif
 
+#if IS_ENABLED(CONFIG_ACPI_GENERIC_GSI)
+int acpi_irq_get(acpi_handle handle, unsigned int index, struct resource *res);
+#else
+static inline
+int acpi_irq_get(acpi_handle handle, unsigned int index, struct resource *res)
+{
+       return -EINVAL;
+}
+#endif
+
 #endif /*_LINUX_ACPI_H*/
index b20e3d56253f6ef0660fd5a35c4b2f7932a6f8ad..2f1c690a3e6622a5b79fa2486854574788083c24 100644 (file)
@@ -593,9 +593,6 @@ struct bcma_sflash {
        u32 blocksize;
        u16 numblocks;
        u32 size;
-
-       struct mtd_info *mtd;
-       void *priv;
 };
 #endif
 
index 92bc89ae7e20733c28f6130cf00314a42aa0c580..c970a25d2a49b34325aa6ae3e386b9ed93743c14 100644 (file)
@@ -21,20 +21,19 @@ struct cgroup_bpf {
         */
        struct bpf_prog *prog[MAX_BPF_ATTACH_TYPE];
        struct bpf_prog __rcu *effective[MAX_BPF_ATTACH_TYPE];
+       bool disallow_override[MAX_BPF_ATTACH_TYPE];
 };
 
 void cgroup_bpf_put(struct cgroup *cgrp);
 void cgroup_bpf_inherit(struct cgroup *cgrp, struct cgroup *parent);
 
-void __cgroup_bpf_update(struct cgroup *cgrp,
-                        struct cgroup *parent,
-                        struct bpf_prog *prog,
-                        enum bpf_attach_type type);
+int __cgroup_bpf_update(struct cgroup *cgrp, struct cgroup *parent,
+                       struct bpf_prog *prog, enum bpf_attach_type type,
+                       bool overridable);
 
 /* Wrapper for __cgroup_bpf_update() protected by cgroup_mutex */
-void cgroup_bpf_update(struct cgroup *cgrp,
-                      struct bpf_prog *prog,
-                      enum bpf_attach_type type);
+int cgroup_bpf_update(struct cgroup *cgrp, struct bpf_prog *prog,
+                     enum bpf_attach_type type, bool overridable);
 
 int __cgroup_bpf_run_filter_skb(struct sock *sk,
                                struct sk_buff *skb,
index 05cf951df3fedd12b742b9478af13aeace86749c..3ed1f3b1d594b67f1fab991492e5d73e10b81606 100644 (file)
@@ -247,6 +247,8 @@ struct bpf_map * __must_check bpf_map_inc(struct bpf_map *map, bool uref);
 void bpf_map_put_with_uref(struct bpf_map *map);
 void bpf_map_put(struct bpf_map *map);
 int bpf_map_precharge_memlock(u32 pages);
+void *bpf_map_area_alloc(size_t size);
+void bpf_map_area_free(void *base);
 
 extern int sysctl_unprivileged_bpf_disabled;
 
index d67ab83823adc81234b0380feca85877996817db..79591c3660cc1dc61e1e3f4b501c470161c41eb9 100644 (file)
@@ -243,12 +243,10 @@ static inline int block_page_mkwrite_return(int err)
 {
        if (err == 0)
                return VM_FAULT_LOCKED;
-       if (err == -EFAULT)
+       if (err == -EFAULT || err == -EAGAIN)
                return VM_FAULT_NOPAGE;
        if (err == -ENOMEM)
                return VM_FAULT_OOM;
-       if (err == -EAGAIN)
-               return VM_FAULT_RETRY;
        /* -ENOSPC, -EDQUOT, -EIO ... */
        return VM_FAULT_SIGBUS;
 }
index a0875001b13c84ad70a9b2909654e9ffb6824c58..df08a41d5be5f26cfa4cdc74935f5eae7fa51385 100644 (file)
@@ -45,10 +45,9 @@ struct can_proto {
 extern int  can_proto_register(const struct can_proto *cp);
 extern void can_proto_unregister(const struct can_proto *cp);
 
-extern int  can_rx_register(struct net_device *dev, canid_t can_id,
-                           canid_t mask,
-                           void (*func)(struct sk_buff *, void *),
-                           void *data, char *ident);
+int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask,
+                   void (*func)(struct sk_buff *, void *),
+                   void *data, char *ident, struct sock *sk);
 
 extern void can_rx_unregister(struct net_device *dev, canid_t can_id,
                              canid_t mask,
index 0d442e34c34979a503030214007223742b3bc822..5d3053c34fb3d5c365ad1f5a44e39a03fb6926c8 100644 (file)
@@ -224,4 +224,13 @@ static inline void tick_setup_hrtimer_broadcast(void) { }
 
 #endif /* !CONFIG_GENERIC_CLOCKEVENTS */
 
+#define CLOCKEVENT_OF_DECLARE(name, compat, fn) \
+       OF_DECLARE_1_RET(clkevt, name, compat, fn)
+
+#ifdef CONFIG_CLKEVT_PROBE
+extern int clockevent_probe(void);
+#els
+static inline int clockevent_probe(void) { return 0; }
+#endif
+
 #endif /* _LINUX_CLOCKCHIPS_H */
index e315d04a2fd91ceb94948949d3affd8e12d25748..cfc75848a35d2c58ed85720fcd344fdc2c6f04ad 100644 (file)
@@ -62,6 +62,8 @@ struct module;
  * @archdata:          arch-specific data
  * @suspend:           suspend function for the clocksource, if necessary
  * @resume:            resume function for the clocksource, if necessary
+ * @mark_unstable:     Optional function to inform the clocksource driver that
+ *                     the watchdog marked the clocksource unstable
  * @owner:             module reference, must be set by clocksource in modules
  *
  * Note: This struct is not used in hotpathes of the timekeeping code
@@ -93,6 +95,7 @@ struct clocksource {
        unsigned long flags;
        void (*suspend)(struct clocksource *cs);
        void (*resume)(struct clocksource *cs);
+       void (*mark_unstable)(struct clocksource *cs);
 
        /* private: */
 #ifdef CONFIG_CLOCKSOURCE_WATCHDOG
index 63609398ef9f21bca890619d29be4329df4c079e..9e40be522793eb3d8f533cb3e2d7cccc0b3ceabf 100644 (file)
@@ -731,7 +731,25 @@ asmlinkage long compat_sys_fanotify_mark(int, unsigned int, __u32, __u32,
 static inline bool in_compat_syscall(void) { return is_compat_task(); }
 #endif
 
-#else
+/**
+ * ns_to_compat_timeval - Compat version of ns_to_timeval
+ * @nsec:      the nanoseconds value to be converted
+ *
+ * Returns the compat_timeval representation of the nsec parameter.
+ */
+static inline struct compat_timeval ns_to_compat_timeval(s64 nsec)
+{
+       struct timeval tv;
+       struct compat_timeval ctv;
+
+       tv = ns_to_timeval(nsec);
+       ctv.tv_sec = tv.tv_sec;
+       ctv.tv_usec = tv.tv_usec;
+
+       return ctv;
+}
+
+#else /* !CONFIG_COMPAT */
 
 #define is_compat_task() (0)
 static inline bool in_compat_syscall(void) { return false; }
index 7e05c5e4e45cd49a82c8669089f307f9ac7e01b5..87165f06a3079dcfe507674e495859c5627f74ca 100644 (file)
@@ -31,7 +31,7 @@
 
 #define CPUFREQ_ETERNAL                        (-1)
 #define CPUFREQ_NAME_LEN               16
-/* Print length for names. Extra 1 space for accomodating '\n' in prints */
+/* Print length for names. Extra 1 space for accommodating '\n' in prints */
 #define CPUFREQ_NAME_PLEN              (CPUFREQ_NAME_LEN + 1)
 
 struct cpufreq_governor;
@@ -115,7 +115,7 @@ struct cpufreq_policy {
         *   guarantee that frequency can be changed on any CPU sharing the
         *   policy and that the change will affect all of the policy CPUs then.
         * - fast_switch_enabled is to be set by governors that support fast
-        *   freqnency switching with the help of cpufreq_enable_fast_switch().
+        *   frequency switching with the help of cpufreq_enable_fast_switch().
         */
        bool                    fast_switch_possible;
        bool                    fast_switch_enabled;
@@ -415,9 +415,6 @@ static inline void cpufreq_resume(void) {}
 /* Policy Notifiers  */
 #define CPUFREQ_ADJUST                 (0)
 #define CPUFREQ_NOTIFY                 (1)
-#define CPUFREQ_START                  (2)
-#define CPUFREQ_CREATE_POLICY          (3)
-#define CPUFREQ_REMOVE_POLICY          (4)
 
 #ifdef CONFIG_CPU_FREQ
 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list);
index d936a0021839cca651e19ec43e71b8f21cb69cf0..921acaaa16017979df0722fb9803b204d77c0be0 100644 (file)
@@ -8,9 +8,7 @@ enum cpuhp_state {
        CPUHP_CREATE_THREADS,
        CPUHP_PERF_PREPARE,
        CPUHP_PERF_X86_PREPARE,
-       CPUHP_PERF_X86_UNCORE_PREP,
        CPUHP_PERF_X86_AMD_UNCORE_PREP,
-       CPUHP_PERF_X86_RAPL_PREP,
        CPUHP_PERF_BFIN,
        CPUHP_PERF_POWER,
        CPUHP_PERF_SUPERH,
@@ -86,7 +84,6 @@ enum cpuhp_state {
        CPUHP_AP_IRQ_ARMADA_XP_STARTING,
        CPUHP_AP_IRQ_BCM2836_STARTING,
        CPUHP_AP_ARM_MVEBU_COHERENCY,
-       CPUHP_AP_PERF_X86_UNCORE_STARTING,
        CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
        CPUHP_AP_PERF_X86_STARTING,
        CPUHP_AP_PERF_X86_AMD_IBS_STARTING,
index c717f5ea88cb7e7f04471f34ac14bcf9bc75a4d1..96f1e88b767c0058bee7f3dbcc0d175e68735cfe 100644 (file)
@@ -560,7 +560,7 @@ static inline void cpumask_copy(struct cpumask *dstp,
 static inline int cpumask_parse_user(const char __user *buf, int len,
                                     struct cpumask *dstp)
 {
-       return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpu_ids);
+       return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpumask_bits);
 }
 
 /**
@@ -575,7 +575,7 @@ static inline int cpumask_parselist_user(const char __user *buf, int len,
                                     struct cpumask *dstp)
 {
        return bitmap_parselist_user(buf, len, cpumask_bits(dstp),
-                                    nr_cpu_ids);
+                                    nr_cpumask_bits);
 }
 
 /**
@@ -590,7 +590,7 @@ static inline int cpumask_parse(const char *buf, struct cpumask *dstp)
        char *nl = strchr(buf, '\n');
        unsigned int len = nl ? (unsigned int)(nl - buf) : strlen(buf);
 
-       return bitmap_parse(buf, len, cpumask_bits(dstp), nr_cpu_ids);
+       return bitmap_parse(buf, len, cpumask_bits(dstp), nr_cpumask_bits);
 }
 
 /**
@@ -602,7 +602,7 @@ static inline int cpumask_parse(const char *buf, struct cpumask *dstp)
  */
 static inline int cpulist_parse(const char *buf, struct cpumask *dstp)
 {
-       return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpu_ids);
+       return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpumask_bits);
 }
 
 /**
@@ -649,11 +649,15 @@ static inline size_t cpumask_size(void)
  * used. Please use this_cpu_cpumask_var_t in those cases. The direct use
  * of this_cpu_ptr() or this_cpu_read() will lead to failures when the
  * other type of cpumask_var_t implementation is configured.
+ *
+ * Please also note that __cpumask_var_read_mostly can be used to declare
+ * a cpumask_var_t variable itself (not its content) as read mostly.
  */
 #ifdef CONFIG_CPUMASK_OFFSTACK
 typedef struct cpumask *cpumask_var_t;
 
-#define this_cpu_cpumask_var_ptr(x) this_cpu_read(x)
+#define this_cpu_cpumask_var_ptr(x)    this_cpu_read(x)
+#define __cpumask_var_read_mostly      __read_mostly
 
 bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node);
 bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags);
@@ -667,6 +671,7 @@ void free_bootmem_cpumask_var(cpumask_var_t mask);
 typedef struct cpumask cpumask_var_t[1];
 
 #define this_cpu_cpumask_var_ptr(x) this_cpu_ptr(x)
+#define __cpumask_var_read_mostly
 
 static inline bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
 {
index f2eb2ee535cabeffea2019cfcfdf113a9025f768..a691dc4ddc130ae02429541a6c4dc09bb25e2414 100644 (file)
@@ -1,6 +1,7 @@
 #ifndef __LINUX_CPUTIME_H
 #define __LINUX_CPUTIME_H
 
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
 #include <asm/cputime.h>
 
 #ifndef cputime_to_nsecs
@@ -8,9 +9,5 @@
        (cputime_to_usecs(__ct) * NSEC_PER_USEC)
 #endif
 
-#ifndef nsecs_to_cputime
-# define nsecs_to_cputime(__nsecs)     \
-       usecs_to_cputime((__nsecs) / NSEC_PER_USEC)
-#endif
-
+#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
 #endif /* __LINUX_CPUTIME_H */
index a6ecb34cf547da29ad16edf8b109de2511d10cd5..2ecb3c46b20a2af689b1fcf4a6bea3c8924ded60 100644 (file)
@@ -5,6 +5,17 @@
  * Copyright (C) 1993 Linus Torvalds
  *
  * Delay routines, using a pre-computed "loops_per_jiffy" value.
+ *
+ * Please note that ndelay(), udelay() and mdelay() may return early for
+ * several reasons:
+ *  1. computed loops_per_jiffy too low (due to the time taken to
+ *     execute the timer interrupt.)
+ *  2. cache behaviour affecting the time it takes to execute the
+ *     loop function.
+ *  3. CPU clock rate changes.
+ *
+ * Please see this thread:
+ *   http://lists.openwall.net/linux-kernel/2011/01/09/56
  */
 
 #include <linux/kernel.h>
index 6cee17c2231384cdaaad3fac1792cc9ca6e798b7..00e60f79a9cc7e44df22093fa0550d26fbf2d7e9 100644 (file)
@@ -17,6 +17,7 @@
 #ifndef _LINUX_DELAYACCT_H
 #define _LINUX_DELAYACCT_H
 
+#include <uapi/linux/taskstats.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
 
index 2de4e2eea180d133898980f87c659f86a7fb922b..e0acb0e5243b49552480ed8cfac2d037226f4304 100644 (file)
@@ -104,6 +104,8 @@ struct devfreq_dev_profile {
  * struct devfreq_governor - Devfreq policy governor
  * @node:              list node - contains registered devfreq governors
  * @name:              Governor's name
+ * @immutable:         Immutable flag for governor. If the value is 1,
+ *                     this govenror is never changeable to other governor.
  * @get_target_freq:   Returns desired operating frequency for the device.
  *                     Basically, get_target_freq will run
  *                     devfreq_dev_profile.get_dev_status() to get the
@@ -121,6 +123,7 @@ struct devfreq_governor {
        struct list_head node;
 
        const char name[DEVFREQ_NAME_LEN];
+       const unsigned int immutable;
        int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
        int (*event_handler)(struct devfreq *devfreq,
                                unsigned int event, void *data);
index 7f7e9a7e3839966cfcb19c49995213188ca1e2b3..5725c94b1f121ece268c0fd8558cb94770c2c9cd 100644 (file)
@@ -27,6 +27,7 @@ int iommu_dma_init(void);
 
 /* Domain management interface for IOMMU drivers */
 int iommu_get_dma_cookie(struct iommu_domain *domain);
+int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base);
 void iommu_put_dma_cookie(struct iommu_domain *domain);
 
 /* Setup call for arch DMA mapping code */
@@ -34,7 +35,8 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
                u64 size, struct device *dev);
 
 /* General helpers for DMA-API <-> IOMMU-API interaction */
-int dma_direction_to_prot(enum dma_data_direction dir, bool coherent);
+int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
+                    unsigned long attrs);
 
 /*
  * These implement the bulk of the relevant DMA mapping callbacks, but require
@@ -65,7 +67,6 @@ dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
                size_t size, enum dma_data_direction dir, unsigned long attrs);
 void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
                size_t size, enum dma_data_direction dir, unsigned long attrs);
-int iommu_dma_supported(struct device *dev, u64 mask);
 int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
 
 /* The DMA API isn't _quite_ the whole story, though... */
@@ -86,6 +87,11 @@ static inline int iommu_get_dma_cookie(struct iommu_domain *domain)
        return -ENODEV;
 }
 
+static inline int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
+{
+       return -ENODEV;
+}
+
 static inline void iommu_put_dma_cookie(struct iommu_domain *domain)
 {
 }
index 10c5a17b1f51ec33203e64df50282b7a85fde619..c24721a33b4c5d7816d1da34d3440a2d0d67be2c 100644 (file)
  */
 #define DMA_ATTR_NO_WARN       (1UL << 8)
 
+/*
+ * DMA_ATTR_PRIVILEGED: used to indicate that the buffer is fully
+ * accessible at an elevated privilege level (and ideally inaccessible or
+ * at least read-only at lesser-privileged levels).
+ */
+#define DMA_ATTR_PRIVILEGED            (1UL << 9)
+
 /*
  * A dma_addr_t can hold any valid DMA or bus address for the platform.
  * It can be given to a device to use as a DMA source or target.  A CPU cannot
index 07c52c0af62d1aa9525c4ecd66c5f59f0e75619d..5b6adf964248dd478dd6d266ccc59468d26951ce 100644 (file)
@@ -190,8 +190,8 @@ static inline char *mc_event_error_type(const unsigned int err_type)
  *                     part of the memory details to the memory controller.
  * @MEM_RMBS:          Rambus DRAM, used on a few Pentium III/IV controllers.
  * @MEM_DDR2:          DDR2 RAM, as described at JEDEC JESD79-2F.
- *                     Those memories are labed as "PC2-" instead of "PC" to
- *                     differenciate from DDR.
+ *                     Those memories are labeled as "PC2-" instead of "PC" to
+ *                     differentiate from DDR.
  * @MEM_FB_DDR2:       Fully-Buffered DDR2, as described at JEDEC Std No. 205
  *                     and JESD206.
  *                     Those memories are accessed per DIMM slot, and not by
index 051b21fedf681a39d102da1a16ddb2a33e0ff0f4..2fd3993c370b273c9ae8c0f58604a1280527f832 100644 (file)
@@ -1,20 +1,19 @@
 #ifndef _LINUX_EFI_BGRT_H
 #define _LINUX_EFI_BGRT_H
 
-#ifdef CONFIG_ACPI_BGRT
-
 #include <linux/acpi.h>
 
-void efi_bgrt_init(void);
+#ifdef CONFIG_ACPI_BGRT
+
+void efi_bgrt_init(struct acpi_table_header *table);
 
 /* The BGRT data itself; only valid if bgrt_image != NULL. */
-extern void *bgrt_image;
 extern size_t bgrt_image_size;
-extern struct acpi_table_bgrt *bgrt_tab;
+extern struct acpi_table_bgrt bgrt_tab;
 
 #else /* !CONFIG_ACPI_BGRT */
 
-static inline void efi_bgrt_init(void) {}
+static inline void efi_bgrt_init(struct acpi_table_header *table) {}
 
 #endif /* !CONFIG_ACPI_BGRT */
 
index 5b1af30ece55828e655b2bf02a149fd71354f3eb..94d34e0be24f55477573f48806de2d2817fe9722 100644 (file)
@@ -509,24 +509,6 @@ typedef struct {
        u64 query_variable_info;
 } efi_runtime_services_64_t;
 
-typedef struct {
-       efi_table_hdr_t hdr;
-       void *get_time;
-       void *set_time;
-       void *get_wakeup_time;
-       void *set_wakeup_time;
-       void *set_virtual_address_map;
-       void *convert_pointer;
-       void *get_variable;
-       void *get_next_variable;
-       void *set_variable;
-       void *get_next_high_mono_count;
-       void *reset_system;
-       void *update_capsule;
-       void *query_capsule_caps;
-       void *query_variable_info;
-} efi_runtime_services_t;
-
 typedef efi_status_t efi_get_time_t (efi_time_t *tm, efi_time_cap_t *tc);
 typedef efi_status_t efi_set_time_t (efi_time_t *tm);
 typedef efi_status_t efi_get_wakeup_time_t (efi_bool_t *enabled, efi_bool_t *pending,
@@ -561,6 +543,24 @@ typedef efi_status_t efi_query_variable_store_t(u32 attributes,
                                                unsigned long size,
                                                bool nonblocking);
 
+typedef struct {
+       efi_table_hdr_t                 hdr;
+       efi_get_time_t                  *get_time;
+       efi_set_time_t                  *set_time;
+       efi_get_wakeup_time_t           *get_wakeup_time;
+       efi_set_wakeup_time_t           *set_wakeup_time;
+       efi_set_virtual_address_map_t   *set_virtual_address_map;
+       void                            *convert_pointer;
+       efi_get_variable_t              *get_variable;
+       efi_get_next_variable_t         *get_next_variable;
+       efi_set_variable_t              *set_variable;
+       efi_get_next_high_mono_count_t  *get_next_high_mono_count;
+       efi_reset_system_t              *reset_system;
+       efi_update_capsule_t            *update_capsule;
+       efi_query_capsule_caps_t        *query_capsule_caps;
+       efi_query_variable_info_t       *query_variable_info;
+} efi_runtime_services_t;
+
 void efi_native_runtime_setup(void);
 
 /*
@@ -611,6 +611,9 @@ void efi_native_runtime_setup(void);
 #define EFI_CONSOLE_OUT_DEVICE_GUID            EFI_GUID(0xd3b36f2c, 0xd551, 0x11d4,  0x9a, 0x46, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
 #define APPLE_PROPERTIES_PROTOCOL_GUID         EFI_GUID(0x91bd12fe, 0xf6c3, 0x44fb,  0xa5, 0xb7, 0x51, 0x22, 0xab, 0x30, 0x3a, 0xe0)
 
+#define EFI_IMAGE_SECURITY_DATABASE_GUID       EFI_GUID(0xd719b2cb, 0x3d3a, 0x4596,  0xa3, 0xbc, 0xda, 0xd0, 0x0e, 0x67, 0x65, 0x6f)
+#define EFI_SHIM_LOCK_GUID                     EFI_GUID(0x605dab50, 0xe046, 0x4300,  0xab, 0xb6, 0x3d, 0xd8, 0x10, 0xdd, 0x8b, 0x23)
+
 /*
  * This GUID is used to pass to the kernel proper the struct screen_info
  * structure that was populated by the stub based on the GOP protocol instance
@@ -1065,6 +1068,7 @@ extern int __init efi_setup_pcdp_console(char *);
 #define EFI_ARCH_1             7       /* First arch-specific bit */
 #define EFI_DBG                        8       /* Print additional debug info at runtime */
 #define EFI_NX_PE_DATA         9       /* Can runtime data regions be mapped non-executable? */
+#define EFI_MEM_ATTR           10      /* Did firmware publish an EFI_MEMORY_ATTRIBUTES table? */
 
 #ifdef CONFIG_EFI
 /*
@@ -1240,17 +1244,17 @@ struct efivar_entry {
        bool deleting;
 };
 
-struct efi_simple_text_output_protocol_32 {
+typedef struct {
        u32 reset;
        u32 output_string;
        u32 test_string;
-};
+} efi_simple_text_output_protocol_32_t;
 
-struct efi_simple_text_output_protocol_64 {
+typedef struct {
        u64 reset;
        u64 output_string;
        u64 test_string;
-};
+} efi_simple_text_output_protocol_64_t;
 
 struct efi_simple_text_output_protocol {
        void *reset;
@@ -1476,6 +1480,14 @@ efi_status_t efi_setup_gop(efi_system_table_t *sys_table_arg,
 bool efi_runtime_disabled(void);
 extern void efi_call_virt_check_flags(unsigned long flags, const char *call);
 
+enum efi_secureboot_mode {
+       efi_secureboot_mode_unset,
+       efi_secureboot_mode_unknown,
+       efi_secureboot_mode_disabled,
+       efi_secureboot_mode_enabled,
+};
+enum efi_secureboot_mode efi_get_secureboot(efi_system_table_t *sys_table);
+
 /*
  * Arch code can implement the following three template macros, avoiding
  * reptition for the void/non-void return cases of {__,}efi_call_virt():
index 2a0f61fbc7310e61f5927c31250e208d217c3e26..1a1dfdb2a5c6d8806d11e2cd58722e304f293054 100644 (file)
@@ -43,12 +43,19 @@ extern struct module __this_module;
 #ifdef CONFIG_MODVERSIONS
 /* Mark the CRC weak since genksyms apparently decides not to
  * generate a checksums for some symbols */
+#if defined(CONFIG_MODULE_REL_CRCS)
 #define __CRC_SYMBOL(sym, sec)                                         \
-       extern __visible void *__crc_##sym __attribute__((weak));       \
-       static const unsigned long __kcrctab_##sym                      \
-       __used                                                          \
-       __attribute__((section("___kcrctab" sec "+" #sym), used))       \
-       = (unsigned long) &__crc_##sym;
+       asm("   .section \"___kcrctab" sec "+" #sym "\", \"a\"  \n"     \
+           "   .weak   " VMLINUX_SYMBOL_STR(__crc_##sym) "     \n"     \
+           "   .long   " VMLINUX_SYMBOL_STR(__crc_##sym) " - . \n"     \
+           "   .previous                                       \n");
+#else
+#define __CRC_SYMBOL(sym, sec)                                         \
+       asm("   .section \"___kcrctab" sec "+" #sym "\", \"a\"  \n"     \
+           "   .weak   " VMLINUX_SYMBOL_STR(__crc_##sym) "     \n"     \
+           "   .long   " VMLINUX_SYMBOL_STR(__crc_##sym) "     \n"     \
+           "   .previous                                       \n");
+#endif
 #else
 #define __CRC_SYMBOL(sym, sec)
 #endif
index 13ba552e6c094e82ee8b952b26d9f0bf5eeebc39..4c467ef50159db533ecb567a86eeaf6e1e81e632 100644 (file)
@@ -360,6 +360,7 @@ struct fscache_object {
 #define FSCACHE_OBJECT_IS_AVAILABLE    5       /* T if object has become active */
 #define FSCACHE_OBJECT_RETIRED         6       /* T if object was retired on relinquishment */
 #define FSCACHE_OBJECT_KILLED_BY_CACHE 7       /* T if object was killed by the cache */
+#define FSCACHE_OBJECT_RUN_AFTER_DEAD  8       /* T if object has been dispatched after death */
 
        struct list_head        cache_link;     /* link in cache->object_list */
        struct hlist_node       cookie_link;    /* link in cookie->backing_objects */
index 3f9778cbc79d0b1bb3b73e0a12695b722fa7a179..c332f0a4560794225a538f2a45ed77f936472c46 100644 (file)
@@ -733,8 +733,12 @@ struct fsl_ifc_nand {
        __be32 nand_erattr1;
        u32 res19[0x10];
        __be32 nand_fsr;
-       u32 res20[0x3];
-       __be32 nand_eccstat[6];
+       u32 res20;
+       /* The V1 nand_eccstat is actually 4 words that overlaps the
+        * V2 nand_eccstat.
+        */
+       __be32 v1_nand_eccstat[2];
+       __be32 v2_nand_eccstat[6];
        u32 res21[0x1c];
        __be32 nanndcr;
        u32 res22[0x2];
index c2748accea71aa006268afebe65898fcc9f6d033..e973faba69dc5c90586aa97511d860178f31ff2e 100644 (file)
@@ -274,37 +274,67 @@ void gpiochip_set_nested_irqchip(struct gpio_chip *gpiochip,
                struct irq_chip *irqchip,
                int parent_irq);
 
-int _gpiochip_irqchip_add(struct gpio_chip *gpiochip,
+int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip,
+                            struct irq_chip *irqchip,
+                            unsigned int first_irq,
+                            irq_flow_handler_t handler,
+                            unsigned int type,
+                            bool nested,
+                            struct lock_class_key *lock_key);
+
+#ifdef CONFIG_LOCKDEP
+
+/*
+ * Lockdep requires that each irqchip instance be created with a
+ * unique key so as to avoid unnecessary warnings. This upfront
+ * boilerplate static inlines provides such a key for each
+ * unique instance.
+ */
+static inline int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
+                                      struct irq_chip *irqchip,
+                                      unsigned int first_irq,
+                                      irq_flow_handler_t handler,
+                                      unsigned int type)
+{
+       static struct lock_class_key key;
+
+       return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
+                                       handler, type, false, &key);
+}
+
+static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip,
                          struct irq_chip *irqchip,
                          unsigned int first_irq,
                          irq_flow_handler_t handler,
-                         unsigned int type,
-                         bool nested,
-                         struct lock_class_key *lock_key);
+                         unsigned int type)
+{
+
+       static struct lock_class_key key;
+
+       return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
+                                       handler, type, true, &key);
+}
+#else
+static inline int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
+                                      struct irq_chip *irqchip,
+                                      unsigned int first_irq,
+                                      irq_flow_handler_t handler,
+                                      unsigned int type)
+{
+       return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
+                                       handler, type, false, NULL);
+}
 
-/* FIXME: I assume threaded IRQchips do not have the lockdep problem */
 static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip,
                          struct irq_chip *irqchip,
                          unsigned int first_irq,
                          irq_flow_handler_t handler,
                          unsigned int type)
 {
-       return _gpiochip_irqchip_add(gpiochip, irqchip, first_irq,
-                                    handler, type, true, NULL);
+       return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
+                                       handler, type, true, NULL);
 }
-
-#ifdef CONFIG_LOCKDEP
-#define gpiochip_irqchip_add(...)                              \
-(                                                              \
-       ({                                                      \
-               static struct lock_class_key _key;              \
-               _gpiochip_irqchip_add(__VA_ARGS__, false, &_key); \
-       })                                                      \
-)
-#else
-#define gpiochip_irqchip_add(...)                              \
-       _gpiochip_irqchip_add(__VA_ARGS__, false, NULL)
-#endif
+#endif /* CONFIG_LOCKDEP */
 
 #endif /* CONFIG_GPIOLIB_IRQCHIP */
 
index cdab81ba29f899934794f1e7924f3dbb6d469d5f..e52b427223baa89f6167cbc0d285543a120cdf9f 100644 (file)
@@ -88,12 +88,6 @@ enum hrtimer_restart {
  * @base:      pointer to the timer base (per cpu and per clock)
  * @state:     state information (See bit values above)
  * @is_rel:    Set if the timer was armed relative
- * @start_pid:  timer statistics field to store the pid of the task which
- *             started the timer
- * @start_site:        timer statistics field to store the site where the timer
- *             was started
- * @start_comm: timer statistics field to store the name of the process which
- *             started the timer
  *
  * The hrtimer structure must be initialized by hrtimer_init()
  */
@@ -104,11 +98,6 @@ struct hrtimer {
        struct hrtimer_clock_base       *base;
        u8                              state;
        u8                              is_rel;
-#ifdef CONFIG_TIMER_STATS
-       int                             start_pid;
-       void                            *start_site;
-       char                            start_comm[16];
-#endif
 };
 
 /**
index 42fe43fb0c80605f9553c746f04310f43914f683..183efde54269e18c5d4d1eda7dc448717fe85800 100644 (file)
@@ -128,6 +128,7 @@ struct hv_ring_buffer_info {
        u32 ring_data_startoffset;
        u32 priv_write_index;
        u32 priv_read_index;
+       u32 cached_read_index;
 };
 
 /*
@@ -180,6 +181,19 @@ static inline u32 hv_get_bytes_to_write(struct hv_ring_buffer_info *rbi)
        return write;
 }
 
+static inline u32 hv_get_cached_bytes_to_write(
+       const struct hv_ring_buffer_info *rbi)
+{
+       u32 read_loc, write_loc, dsize, write;
+
+       dsize = rbi->ring_datasize;
+       read_loc = rbi->cached_read_index;
+       write_loc = rbi->ring_buffer->write_index;
+
+       write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
+               read_loc - write_loc;
+       return write;
+}
 /*
  * VMBUS version is 32 bit entity broken up into
  * two 16 bit quantities: major_number. minor_number.
@@ -1488,7 +1502,7 @@ hv_get_ring_buffer(struct hv_ring_buffer_info *ring_info)
 
 static inline  void hv_signal_on_read(struct vmbus_channel *channel)
 {
-       u32 cur_write_sz;
+       u32 cur_write_sz, cached_write_sz;
        u32 pending_sz;
        struct hv_ring_buffer_info *rbi = &channel->inbound;
 
@@ -1512,12 +1526,24 @@ static inline  void hv_signal_on_read(struct vmbus_channel *channel)
 
        cur_write_sz = hv_get_bytes_to_write(rbi);
 
-       if (cur_write_sz >= pending_sz)
+       if (cur_write_sz < pending_sz)
+               return;
+
+       cached_write_sz = hv_get_cached_bytes_to_write(rbi);
+       if (cached_write_sz < pending_sz)
                vmbus_setevent(channel);
 
        return;
 }
 
+static inline void
+init_cached_read_index(struct vmbus_channel *channel)
+{
+       struct hv_ring_buffer_info *rbi = &channel->inbound;
+
+       rbi->cached_read_index = rbi->ring_buffer->read_index;
+}
+
 /*
  * An API to support in-place processing of incoming VMBUS packets.
  */
@@ -1569,6 +1595,8 @@ static inline void put_pkt_raw(struct vmbus_channel *channel,
  * This call commits the read index and potentially signals the host.
  * Here is the pattern for using the "in-place" consumption APIs:
  *
+ * init_cached_read_index();
+ *
  * while (get_next_pkt_raw() {
  *     process the packet "in-place";
  *     put_pkt_raw();
index 325f649d77ff24bb65a26be80964a8bea04671a8..3a85d61f761422d942502f533c8177d67fc7f323 100644 (file)
@@ -42,6 +42,27 @@ extern struct fs_struct init_fs;
 #define INIT_PREV_CPUTIME(x)
 #endif
 
+#ifdef CONFIG_POSIX_TIMERS
+#define INIT_POSIX_TIMERS(s)                                           \
+       .posix_timers = LIST_HEAD_INIT(s.posix_timers),
+#define INIT_CPU_TIMERS(s)                                             \
+       .cpu_timers = {                                                 \
+               LIST_HEAD_INIT(s.cpu_timers[0]),                        \
+               LIST_HEAD_INIT(s.cpu_timers[1]),                        \
+               LIST_HEAD_INIT(s.cpu_timers[2]),                                                                \
+       },
+#define INIT_CPUTIMER(s)                                               \
+       .cputimer       = {                                             \
+               .cputime_atomic = INIT_CPUTIME_ATOMIC,                  \
+               .running        = false,                                \
+               .checking_timer = false,                                \
+       },
+#else
+#define INIT_POSIX_TIMERS(s)
+#define INIT_CPU_TIMERS(s)
+#define INIT_CPUTIMER(s)
+#endif
+
 #define INIT_SIGNALS(sig) {                                            \
        .nr_threads     = 1,                                            \
        .thread_head    = LIST_HEAD_INIT(init_task.thread_node),        \
@@ -49,14 +70,10 @@ extern struct fs_struct init_fs;
        .shared_pending = {                                             \
                .list = LIST_HEAD_INIT(sig.shared_pending.list),        \
                .signal =  {{0}}},                                      \
-       .posix_timers    = LIST_HEAD_INIT(sig.posix_timers),            \
-       .cpu_timers     = INIT_CPU_TIMERS(sig.cpu_timers),              \
+       INIT_POSIX_TIMERS(sig)                                          \
+       INIT_CPU_TIMERS(sig)                                            \
        .rlim           = INIT_RLIMITS,                                 \
-       .cputimer       = {                                             \
-               .cputime_atomic = INIT_CPUTIME_ATOMIC,                  \
-               .running        = false,                                \
-               .checking_timer = false,                                \
-       },                                                              \
+       INIT_CPUTIMER(sig)                                              \
        INIT_PREV_CPUTIME(sig)                                          \
        .cred_guard_mutex =                                             \
                 __MUTEX_INITIALIZER(sig.cred_guard_mutex),             \
@@ -247,7 +264,7 @@ extern struct task_group root_task_group;
        .blocked        = {{0}},                                        \
        .alloc_lock     = __SPIN_LOCK_UNLOCKED(tsk.alloc_lock),         \
        .journal_info   = NULL,                                         \
-       .cpu_timers     = INIT_CPU_TIMERS(tsk.cpu_timers),              \
+       INIT_CPU_TIMERS(tsk)                                            \
        .pi_lock        = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock),        \
        .timer_slack_ns = 50000, /* 50 usec default slack */            \
        .pids = {                                                       \
@@ -274,13 +291,6 @@ extern struct task_group root_task_group;
 }
 
 
-#define INIT_CPU_TIMERS(cpu_timers)                                    \
-{                                                                      \
-       LIST_HEAD_INIT(cpu_timers[0]),                                  \
-       LIST_HEAD_INIT(cpu_timers[1]),                                  \
-       LIST_HEAD_INIT(cpu_timers[2]),                                  \
-}
-
 /* Attach to the init_task data structure for proper alignment */
 #define __init_task_data __attribute__((__section__(".data..init_task")))
 
index d49e26c6cdc7b5e48e41591f7c73e74d200441a8..c573a52ae440e83894709328820bacb79391d15c 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/dma_remapping.h>
 #include <linux/mmu_notifier.h>
 #include <linux/list.h>
+#include <linux/iommu.h>
 #include <asm/cacheflush.h>
 #include <asm/iommu.h>
 
@@ -153,8 +154,8 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
 #define DMA_TLB_GLOBAL_FLUSH (((u64)1) << 60)
 #define DMA_TLB_DSI_FLUSH (((u64)2) << 60)
 #define DMA_TLB_PSI_FLUSH (((u64)3) << 60)
-#define DMA_TLB_IIRG(type) ((type >> 60) & 7)
-#define DMA_TLB_IAIG(val) (((val) >> 57) & 7)
+#define DMA_TLB_IIRG(type) ((type >> 60) & 3)
+#define DMA_TLB_IAIG(val) (((val) >> 57) & 3)
 #define DMA_TLB_READ_DRAIN (((u64)1) << 49)
 #define DMA_TLB_WRITE_DRAIN (((u64)1) << 48)
 #define DMA_TLB_DID(id)        (((u64)((id) & 0xffff)) << 32)
@@ -164,9 +165,9 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
 
 /* INVALID_DESC */
 #define DMA_CCMD_INVL_GRANU_OFFSET  61
-#define DMA_ID_TLB_GLOBAL_FLUSH        (((u64)1) << 3)
-#define DMA_ID_TLB_DSI_FLUSH   (((u64)2) << 3)
-#define DMA_ID_TLB_PSI_FLUSH   (((u64)3) << 3)
+#define DMA_ID_TLB_GLOBAL_FLUSH        (((u64)1) << 4)
+#define DMA_ID_TLB_DSI_FLUSH   (((u64)2) << 4)
+#define DMA_ID_TLB_PSI_FLUSH   (((u64)3) << 4)
 #define DMA_ID_TLB_READ_DRAIN  (((u64)1) << 7)
 #define DMA_ID_TLB_WRITE_DRAIN (((u64)1) << 6)
 #define DMA_ID_TLB_DID(id)     (((u64)((id & 0xffff) << 16)))
@@ -316,8 +317,8 @@ enum {
 #define QI_DEV_EIOTLB_SIZE     (((u64)1) << 11)
 #define QI_DEV_EIOTLB_GLOB(g)  ((u64)g)
 #define QI_DEV_EIOTLB_PASID(p) (((u64)p) << 32)
-#define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 32)
-#define QI_DEV_EIOTLB_QDEP(qd) (((qd) & 0x1f) << 16)
+#define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 16)
+#define QI_DEV_EIOTLB_QDEP(qd) ((u64)((qd) & 0x1f) << 4)
 #define QI_DEV_EIOTLB_MAX_INVS 32
 
 #define QI_PGRP_IDX(idx)       (((u64)(idx)) << 55)
@@ -439,7 +440,7 @@ struct intel_iommu {
        struct irq_domain *ir_domain;
        struct irq_domain *ir_msi_domain;
 #endif
-       struct device   *iommu_dev; /* IOMMU-sysfs device */
+       struct iommu_device iommu;  /* IOMMU core code handle */
        int             node;
        u32             flags;      /* Software defined flags */
 };
diff --git a/include/linux/intel_pmic_gpio.h b/include/linux/intel_pmic_gpio.h
deleted file mode 100644 (file)
index 920109a..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-#ifndef LINUX_INTEL_PMIC_H
-#define LINUX_INTEL_PMIC_H
-
-struct intel_pmic_gpio_platform_data {
-       /* the first IRQ of the chip */
-       unsigned        irq_base;
-       /* number assigned to the first GPIO */
-       unsigned        gpio_base;
-       /* sram address for gpiointr register, the langwell chip will map
-        * the PMIC spi GPIO expander's GPIOINTR register in sram.
-        */
-       unsigned        gpiointr;
-};
-
-#endif
index 0ff5111f69595bf9ec93c93677a80816066da820..6a6de187ddc0ff1e0e737f94261211b7ea4408e9 100644 (file)
 #define IOMMU_CACHE    (1 << 2) /* DMA cache coherency */
 #define IOMMU_NOEXEC   (1 << 3)
 #define IOMMU_MMIO     (1 << 4) /* e.g. things like MSI doorbells */
+/*
+ * This is to make the IOMMU API setup privileged
+ * mapppings accessible by the master only at higher
+ * privileged execution level and inaccessible at
+ * less privileged levels.
+ */
+#define IOMMU_PRIV     (1 << 5)
 
 struct iommu_ops;
 struct iommu_group;
@@ -117,18 +124,25 @@ enum iommu_attr {
        DOMAIN_ATTR_MAX,
 };
 
+/* These are the possible reserved region types */
+#define IOMMU_RESV_DIRECT      (1 << 0)
+#define IOMMU_RESV_RESERVED    (1 << 1)
+#define IOMMU_RESV_MSI         (1 << 2)
+
 /**
- * struct iommu_dm_region - descriptor for a direct mapped memory region
+ * struct iommu_resv_region - descriptor for a reserved memory region
  * @list: Linked list pointers
  * @start: System physical start address of the region
  * @length: Length of the region in bytes
  * @prot: IOMMU Protection flags (READ/WRITE/...)
+ * @type: Type of the reserved region
  */
-struct iommu_dm_region {
+struct iommu_resv_region {
        struct list_head        list;
        phys_addr_t             start;
        size_t                  length;
        int                     prot;
+       int                     type;
 };
 
 #ifdef CONFIG_IOMMU_API
@@ -150,9 +164,9 @@ struct iommu_dm_region {
  * @device_group: find iommu group for a particular device
  * @domain_get_attr: Query domain attributes
  * @domain_set_attr: Change domain attributes
- * @get_dm_regions: Request list of direct mapping requirements for a device
- * @put_dm_regions: Free list of direct mapping requirements for a device
- * @apply_dm_region: Temporary helper call-back for iova reserved ranges
+ * @get_resv_regions: Request list of reserved regions for a device
+ * @put_resv_regions: Free list of reserved regions for a device
+ * @apply_resv_region: Temporary helper call-back for iova reserved ranges
  * @domain_window_enable: Configure and enable a particular window for a domain
  * @domain_window_disable: Disable a particular window for a domain
  * @domain_set_windows: Set the number of windows for a domain
@@ -184,11 +198,12 @@ struct iommu_ops {
        int (*domain_set_attr)(struct iommu_domain *domain,
                               enum iommu_attr attr, void *data);
 
-       /* Request/Free a list of direct mapping requirements for a device */
-       void (*get_dm_regions)(struct device *dev, struct list_head *list);
-       void (*put_dm_regions)(struct device *dev, struct list_head *list);
-       void (*apply_dm_region)(struct device *dev, struct iommu_domain *domain,
-                               struct iommu_dm_region *region);
+       /* Request/Free a list of reserved regions for a device */
+       void (*get_resv_regions)(struct device *dev, struct list_head *list);
+       void (*put_resv_regions)(struct device *dev, struct list_head *list);
+       void (*apply_resv_region)(struct device *dev,
+                                 struct iommu_domain *domain,
+                                 struct iommu_resv_region *region);
 
        /* Window handling functions */
        int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr,
@@ -204,6 +219,42 @@ struct iommu_ops {
        unsigned long pgsize_bitmap;
 };
 
+/**
+ * struct iommu_device - IOMMU core representation of one IOMMU hardware
+ *                      instance
+ * @list: Used by the iommu-core to keep a list of registered iommus
+ * @ops: iommu-ops for talking to this iommu
+ * @dev: struct device for sysfs handling
+ */
+struct iommu_device {
+       struct list_head list;
+       const struct iommu_ops *ops;
+       struct fwnode_handle *fwnode;
+       struct device dev;
+};
+
+int  iommu_device_register(struct iommu_device *iommu);
+void iommu_device_unregister(struct iommu_device *iommu);
+int  iommu_device_sysfs_add(struct iommu_device *iommu,
+                           struct device *parent,
+                           const struct attribute_group **groups,
+                           const char *fmt, ...) __printf(4, 5);
+void iommu_device_sysfs_remove(struct iommu_device *iommu);
+int  iommu_device_link(struct iommu_device   *iommu, struct device *link);
+void iommu_device_unlink(struct iommu_device *iommu, struct device *link);
+
+static inline void iommu_device_set_ops(struct iommu_device *iommu,
+                                       const struct iommu_ops *ops)
+{
+       iommu->ops = ops;
+}
+
+static inline void iommu_device_set_fwnode(struct iommu_device *iommu,
+                                          struct fwnode_handle *fwnode)
+{
+       iommu->fwnode = fwnode;
+}
+
 #define IOMMU_GROUP_NOTIFY_ADD_DEVICE          1 /* Device added */
 #define IOMMU_GROUP_NOTIFY_DEL_DEVICE          2 /* Pre Device removed */
 #define IOMMU_GROUP_NOTIFY_BIND_DRIVER         3 /* Pre Driver bind */
@@ -233,9 +284,13 @@ extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t io
 extern void iommu_set_fault_handler(struct iommu_domain *domain,
                        iommu_fault_handler_t handler, void *token);
 
-extern void iommu_get_dm_regions(struct device *dev, struct list_head *list);
-extern void iommu_put_dm_regions(struct device *dev, struct list_head *list);
+extern void iommu_get_resv_regions(struct device *dev, struct list_head *list);
+extern void iommu_put_resv_regions(struct device *dev, struct list_head *list);
 extern int iommu_request_dm_for_dev(struct device *dev);
+extern struct iommu_resv_region *
+iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot, int type);
+extern int iommu_get_group_resv_regions(struct iommu_group *group,
+                                       struct list_head *head);
 
 extern int iommu_attach_group(struct iommu_domain *domain,
                              struct iommu_group *group);
@@ -267,12 +322,6 @@ extern int iommu_domain_get_attr(struct iommu_domain *domain, enum iommu_attr,
                                 void *data);
 extern int iommu_domain_set_attr(struct iommu_domain *domain, enum iommu_attr,
                                 void *data);
-struct device *iommu_device_create(struct device *parent, void *drvdata,
-                                  const struct attribute_group **groups,
-                                  const char *fmt, ...) __printf(4, 5);
-void iommu_device_destroy(struct device *dev);
-int iommu_device_link(struct device *dev, struct device *link);
-void iommu_device_unlink(struct device *dev, struct device *link);
 
 /* Window handling function prototypes */
 extern int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
@@ -352,15 +401,14 @@ int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
                      const struct iommu_ops *ops);
 void iommu_fwspec_free(struct device *dev);
 int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids);
-void iommu_register_instance(struct fwnode_handle *fwnode,
-                            const struct iommu_ops *ops);
-const struct iommu_ops *iommu_get_instance(struct fwnode_handle *fwnode);
+const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode);
 
 #else /* CONFIG_IOMMU_API */
 
 struct iommu_ops {};
 struct iommu_group {};
 struct iommu_fwspec {};
+struct iommu_device {};
 
 static inline bool iommu_present(struct bus_type *bus)
 {
@@ -443,16 +491,22 @@ static inline void iommu_set_fault_handler(struct iommu_domain *domain,
 {
 }
 
-static inline void iommu_get_dm_regions(struct device *dev,
+static inline void iommu_get_resv_regions(struct device *dev,
                                        struct list_head *list)
 {
 }
 
-static inline void iommu_put_dm_regions(struct device *dev,
+static inline void iommu_put_resv_regions(struct device *dev,
                                        struct list_head *list)
 {
 }
 
+static inline int iommu_get_group_resv_regions(struct iommu_group *group,
+                                              struct list_head *head)
+{
+       return -ENODEV;
+}
+
 static inline int iommu_request_dm_for_dev(struct device *dev)
 {
        return -ENODEV;
@@ -546,15 +600,34 @@ static inline int iommu_domain_set_attr(struct iommu_domain *domain,
        return -EINVAL;
 }
 
-static inline struct device *iommu_device_create(struct device *parent,
-                                       void *drvdata,
-                                       const struct attribute_group **groups,
-                                       const char *fmt, ...)
+static inline int  iommu_device_register(struct iommu_device *iommu)
+{
+       return -ENODEV;
+}
+
+static inline void iommu_device_set_ops(struct iommu_device *iommu,
+                                       const struct iommu_ops *ops)
+{
+}
+
+static inline void iommu_device_set_fwnode(struct iommu_device *iommu,
+                                          struct fwnode_handle *fwnode)
+{
+}
+
+static inline void iommu_device_unregister(struct iommu_device *iommu)
 {
-       return ERR_PTR(-ENODEV);
 }
 
-static inline void iommu_device_destroy(struct device *dev)
+static inline int  iommu_device_sysfs_add(struct iommu_device *iommu,
+                                         struct device *parent,
+                                         const struct attribute_group **groups,
+                                         const char *fmt, ...)
+{
+       return -ENODEV;
+}
+
+static inline void iommu_device_sysfs_remove(struct iommu_device *iommu)
 {
 }
 
@@ -584,13 +657,8 @@ static inline int iommu_fwspec_add_ids(struct device *dev, u32 *ids,
        return -ENODEV;
 }
 
-static inline void iommu_register_instance(struct fwnode_handle *fwnode,
-                                          const struct iommu_ops *ops)
-{
-}
-
 static inline
-const struct iommu_ops *iommu_get_instance(struct fwnode_handle *fwnode)
+const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode)
 {
        return NULL;
 }
index e79875574b393f33ed183fdc8f44277a49c26ba8..f887351aa80e787ddde2883f215cded0d18b6274 100644 (file)
@@ -184,6 +184,7 @@ struct irq_data {
  *
  * IRQD_TRIGGER_MASK           - Mask for the trigger type bits
  * IRQD_SETAFFINITY_PENDING    - Affinity setting is pending
+ * IRQD_ACTIVATED              - Interrupt has already been activated
  * IRQD_NO_BALANCING           - Balancing disabled for this IRQ
  * IRQD_PER_CPU                        - Interrupt is per cpu
  * IRQD_AFFINITY_SET           - Interrupt affinity was set
@@ -202,6 +203,7 @@ struct irq_data {
 enum {
        IRQD_TRIGGER_MASK               = 0xf,
        IRQD_SETAFFINITY_PENDING        = (1 <<  8),
+       IRQD_ACTIVATED                  = (1 <<  9),
        IRQD_NO_BALANCING               = (1 << 10),
        IRQD_PER_CPU                    = (1 << 11),
        IRQD_AFFINITY_SET               = (1 << 12),
@@ -312,6 +314,21 @@ static inline bool irqd_affinity_is_managed(struct irq_data *d)
        return __irqd_to_state(d) & IRQD_AFFINITY_MANAGED;
 }
 
+static inline bool irqd_is_activated(struct irq_data *d)
+{
+       return __irqd_to_state(d) & IRQD_ACTIVATED;
+}
+
+static inline void irqd_set_activated(struct irq_data *d)
+{
+       __irqd_to_state(d) |= IRQD_ACTIVATED;
+}
+
+static inline void irqd_clr_activated(struct irq_data *d)
+{
+       __irqd_to_state(d) &= ~IRQD_ACTIVATED;
+}
+
 #undef __irqd_to_state
 
 static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
@@ -715,6 +732,10 @@ unsigned int arch_dynirq_lower_bound(unsigned int from);
 int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
                      struct module *owner, const struct cpumask *affinity);
 
+int __devm_irq_alloc_descs(struct device *dev, int irq, unsigned int from,
+                          unsigned int cnt, int node, struct module *owner,
+                          const struct cpumask *affinity);
+
 /* use macros to avoid needing export.h for THIS_MODULE */
 #define irq_alloc_descs(irq, from, cnt, node)  \
        __irq_alloc_descs(irq, from, cnt, node, THIS_MODULE, NULL)
@@ -731,6 +752,21 @@ int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
 #define irq_alloc_descs_from(from, cnt, node)  \
        irq_alloc_descs(-1, from, cnt, node)
 
+#define devm_irq_alloc_descs(dev, irq, from, cnt, node)                \
+       __devm_irq_alloc_descs(dev, irq, from, cnt, node, THIS_MODULE, NULL)
+
+#define devm_irq_alloc_desc(dev, node)                         \
+       devm_irq_alloc_descs(dev, -1, 0, 1, node)
+
+#define devm_irq_alloc_desc_at(dev, at, node)                  \
+       devm_irq_alloc_descs(dev, at, at, 1, node)
+
+#define devm_irq_alloc_desc_from(dev, from, node)              \
+       devm_irq_alloc_descs(dev, -1, from, 1, node)
+
+#define devm_irq_alloc_descs_from(dev, from, cnt, node)                \
+       devm_irq_alloc_descs(dev, -1, from, cnt, node)
+
 void irq_free_descs(unsigned int irq, unsigned int cnt);
 static inline void irq_free_desc(unsigned int irq)
 {
index e808f8ae6f143b9fba041aee30af864a91708186..725e86b506f313e0070767a946fc18b60e5a9193 100644 (file)
@@ -73,7 +73,6 @@
 
 #define GICD_TYPER_ID_BITS(typer)      ((((typer) >> 19) & 0x1f) + 1)
 #define GICD_TYPER_IRQS(typer)         ((((typer) & 0x1f) + 1) * 32)
-#define GICD_TYPER_LPIS                        (1U << 17)
 
 #define GICD_IROUTER_SPI_MODE_ONE      (0U << 31)
 #define GICD_IROUTER_SPI_MODE_ANY      (1U << 31)
 #define GITS_BASER_TYPE_NONE           0
 #define GITS_BASER_TYPE_DEVICE         1
 #define GITS_BASER_TYPE_VCPU           2
-#define GITS_BASER_TYPE_CPU            3
+#define GITS_BASER_TYPE_RESERVED3      3
 #define GITS_BASER_TYPE_COLLECTION     4
 #define GITS_BASER_TYPE_RESERVED5      5
 #define GITS_BASER_TYPE_RESERVED6      6
 #define GITS_CMD_MAPD                  0x08
 #define GITS_CMD_MAPC                  0x09
 #define GITS_CMD_MAPTI                 0x0a
-/* older GIC documentation used MAPVI for this command */
-#define GITS_CMD_MAPVI                 GITS_CMD_MAPTI
 #define GITS_CMD_MAPI                  0x0b
 #define GITS_CMD_MOVI                  0x01
 #define GITS_CMD_DISCARD               0x0f
index ffb84604c1de0cbcd26fd23d32c3f5a9587acbd0..188eced6813eddb9c313fdb59016b972835e7674 100644 (file)
@@ -183,6 +183,12 @@ enum {
        /* Irq domain is an IPI domain with single virq */
        IRQ_DOMAIN_FLAG_IPI_SINGLE      = (1 << 3),
 
+       /* Irq domain implements MSIs */
+       IRQ_DOMAIN_FLAG_MSI             = (1 << 4),
+
+       /* Irq domain implements MSI remapping */
+       IRQ_DOMAIN_FLAG_MSI_REMAP       = (1 << 5),
+
        /*
         * Flags starting from IRQ_DOMAIN_FLAG_NONCORE are reserved
         * for implementation specific purposes and ignored by the
@@ -216,6 +222,7 @@ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
                                         void *host_data);
 extern struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec,
                                                   enum irq_domain_bus_token bus_token);
+extern bool irq_domain_check_msi_remap(void);
 extern void irq_set_default_host(struct irq_domain *host);
 extern int irq_domain_alloc_descs(int virq, unsigned int nr_irqs,
                                  irq_hw_number_t hwirq, int node,
@@ -446,6 +453,19 @@ static inline bool irq_domain_is_ipi_single(struct irq_domain *domain)
 {
        return domain->flags & IRQ_DOMAIN_FLAG_IPI_SINGLE;
 }
+
+static inline bool irq_domain_is_msi(struct irq_domain *domain)
+{
+       return domain->flags & IRQ_DOMAIN_FLAG_MSI;
+}
+
+static inline bool irq_domain_is_msi_remap(struct irq_domain *domain)
+{
+       return domain->flags & IRQ_DOMAIN_FLAG_MSI_REMAP;
+}
+
+extern bool irq_domain_hierarchical_is_msi_remap(struct irq_domain *domain);
+
 #else  /* CONFIG_IRQ_DOMAIN_HIERARCHY */
 static inline void irq_domain_activate_irq(struct irq_data *data) { }
 static inline void irq_domain_deactivate_irq(struct irq_data *data) { }
@@ -477,6 +497,22 @@ static inline bool irq_domain_is_ipi_single(struct irq_domain *domain)
 {
        return false;
 }
+
+static inline bool irq_domain_is_msi(struct irq_domain *domain)
+{
+       return false;
+}
+
+static inline bool irq_domain_is_msi_remap(struct irq_domain *domain)
+{
+       return false;
+}
+
+static inline bool
+irq_domain_hierarchical_is_msi_remap(struct irq_domain *domain)
+{
+       return false;
+}
 #endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */
 
 #else /* CONFIG_IRQ_DOMAIN */
index 589d14e970ad8da596684e818dca5d3b44e514e3..624215cebee5235a3375549f62e026b3c7e41f74 100644 (file)
@@ -293,6 +293,8 @@ static inline u64 jiffies_to_nsecs(const unsigned long j)
        return (u64)jiffies_to_usecs(j) * NSEC_PER_USEC;
 }
 
+extern u64 jiffies64_to_nsecs(u64 j);
+
 extern unsigned long __msecs_to_jiffies(const unsigned int m);
 #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
 /*
index a0547c571800e7ca77f9a7ffed9412a16bee9262..b63d6b7b0db0a295432320d432da5005f718a0f2 100644 (file)
@@ -402,6 +402,6 @@ extern bool ____wrong_branch_error(void);
 #define static_branch_enable(x)                static_key_enable(&(x)->key)
 #define static_branch_disable(x)       static_key_disable(&(x)->key)
 
-#endif /* _LINUX_JUMP_LABEL_H */
-
 #endif /* __ASSEMBLY__ */
+
+#endif /* _LINUX_JUMP_LABEL_H */
index 00f776816aa355a8d1bc101632114c518a526350..66be8b6becebec1d8a79d65f925a3f3096d22427 100644 (file)
@@ -9,7 +9,6 @@
 #include <linux/sched.h>
 #include <linux/vtime.h>
 #include <asm/irq.h>
-#include <linux/cputime.h>
 
 /*
  * 'kernel_stat.h' contains the definitions needed for doing
@@ -78,15 +77,18 @@ static inline unsigned int kstat_cpu_irqs_sum(unsigned int cpu)
        return kstat_cpu(cpu).irqs_sum;
 }
 
-extern void account_user_time(struct task_struct *, cputime_t);
-extern void account_system_time(struct task_struct *, int, cputime_t);
-extern void account_steal_time(cputime_t);
-extern void account_idle_time(cputime_t);
+extern void account_user_time(struct task_struct *, u64);
+extern void account_guest_time(struct task_struct *, u64);
+extern void account_system_time(struct task_struct *, int, u64);
+extern void account_system_index_time(struct task_struct *, u64,
+                                     enum cpu_usage_stat);
+extern void account_steal_time(u64);
+extern void account_idle_time(u64);
 
 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
 static inline void account_process_tick(struct task_struct *tsk, int user)
 {
-       vtime_account_user(tsk);
+       vtime_flush(tsk);
 }
 #else
 extern void account_process_tick(struct task_struct *, int user);
index 8f68490842481286446efc92a835ff38db1f1aac..16ddfb8b304a330ab2e2594cab78bc481930f32c 100644 (file)
@@ -278,9 +278,13 @@ struct kprobe_insn_cache {
        int nr_garbage;
 };
 
+#ifdef __ARCH_WANT_KPROBES_INSN_SLOT
 extern kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c);
 extern void __free_insn_slot(struct kprobe_insn_cache *c,
                             kprobe_opcode_t *slot, int dirty);
+/* sleep-less address checking routine  */
+extern bool __is_insn_slot_addr(struct kprobe_insn_cache *c,
+                               unsigned long addr);
 
 #define DEFINE_INSN_CACHE_OPS(__name)                                  \
 extern struct kprobe_insn_cache kprobe_##__name##_slots;               \
@@ -294,6 +298,18 @@ static inline void free_##__name##_slot(kprobe_opcode_t *slot, int dirty)\
 {                                                                      \
        __free_insn_slot(&kprobe_##__name##_slots, slot, dirty);        \
 }                                                                      \
+                                                                       \
+static inline bool is_kprobe_##__name##_slot(unsigned long addr)       \
+{                                                                      \
+       return __is_insn_slot_addr(&kprobe_##__name##_slots, addr);     \
+}
+#else /* __ARCH_WANT_KPROBES_INSN_SLOT */
+#define DEFINE_INSN_CACHE_OPS(__name)                                  \
+static inline bool is_kprobe_##__name##_slot(unsigned long addr)       \
+{                                                                      \
+       return 0;                                                       \
+}
+#endif
 
 DEFINE_INSN_CACHE_OPS(insn);
 
@@ -330,7 +346,6 @@ extern int proc_kprobes_optimization_handler(struct ctl_table *table,
                                             int write, void __user *buffer,
                                             size_t *length, loff_t *ppos);
 #endif
-
 #endif /* CONFIG_OPTPROBES */
 #ifdef CONFIG_KPROBES_ON_FTRACE
 extern void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
@@ -481,6 +496,19 @@ static inline int enable_jprobe(struct jprobe *jp)
        return enable_kprobe(&jp->kp);
 }
 
+#ifndef CONFIG_KPROBES
+static inline bool is_kprobe_insn_slot(unsigned long addr)
+{
+       return false;
+}
+#endif
+#ifndef CONFIG_OPTPROBES
+static inline bool is_kprobe_optinsn_slot(unsigned long addr)
+{
+       return false;
+}
+#endif
+
 #ifdef CONFIG_KPROBES
 /*
  * Blacklist ganerating macro. Specify functions which is not probed
index e15828fd71f1b589780b933549e3b07c9652c1e9..f4156f88f5575dceb23a78807a12e5fde037c701 100644 (file)
 #ifndef _KREF_H_
 #define _KREF_H_
 
-#include <linux/bug.h>
-#include <linux/atomic.h>
-#include <linux/kernel.h>
-#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/refcount.h>
 
 struct kref {
-       atomic_t refcount;
+       refcount_t refcount;
 };
 
+#define KREF_INIT(n)   { .refcount = REFCOUNT_INIT(n), }
+
 /**
  * kref_init - initialize object.
  * @kref: object in question.
  */
 static inline void kref_init(struct kref *kref)
 {
-       atomic_set(&kref->refcount, 1);
+       refcount_set(&kref->refcount, 1);
+}
+
+static inline unsigned int kref_read(const struct kref *kref)
+{
+       return refcount_read(&kref->refcount);
 }
 
 /**
@@ -39,17 +44,12 @@ static inline void kref_init(struct kref *kref)
  */
 static inline void kref_get(struct kref *kref)
 {
-       /* If refcount was 0 before incrementing then we have a race
-        * condition when this kref is freeing by some other thread right now.
-        * In this case one should use kref_get_unless_zero()
-        */
-       WARN_ON_ONCE(atomic_inc_return(&kref->refcount) < 2);
+       refcount_inc(&kref->refcount);
 }
 
 /**
- * kref_sub - subtract a number of refcounts for object.
+ * kref_put - decrement refcount for object.
  * @kref: object.
- * @count: Number of recounts to subtract.
  * @release: pointer to the function that will clean up the object when the
  *          last reference to the object is released.
  *          This pointer is required, and it is not acceptable to pass kfree
@@ -58,57 +58,43 @@ static inline void kref_get(struct kref *kref)
  *          maintainer, and anyone else who happens to notice it.  You have
  *          been warned.
  *
- * Subtract @count from the refcount, and if 0, call release().
+ * Decrement the refcount, and if 0, call release().
  * Return 1 if the object was removed, otherwise return 0.  Beware, if this
  * function returns 0, you still can not count on the kref from remaining in
  * memory.  Only use the return value if you want to see if the kref is now
  * gone, not present.
  */
-static inline int kref_sub(struct kref *kref, unsigned int count,
-            void (*release)(struct kref *kref))
+static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref))
 {
        WARN_ON(release == NULL);
 
-       if (atomic_sub_and_test((int) count, &kref->refcount)) {
+       if (refcount_dec_and_test(&kref->refcount)) {
                release(kref);
                return 1;
        }
        return 0;
 }
 
-/**
- * kref_put - decrement refcount for object.
- * @kref: object.
- * @release: pointer to the function that will clean up the object when the
- *          last reference to the object is released.
- *          This pointer is required, and it is not acceptable to pass kfree
- *          in as this function.  If the caller does pass kfree to this
- *          function, you will be publicly mocked mercilessly by the kref
- *          maintainer, and anyone else who happens to notice it.  You have
- *          been warned.
- *
- * Decrement the refcount, and if 0, call release().
- * Return 1 if the object was removed, otherwise return 0.  Beware, if this
- * function returns 0, you still can not count on the kref from remaining in
- * memory.  Only use the return value if you want to see if the kref is now
- * gone, not present.
- */
-static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref))
-{
-       return kref_sub(kref, 1, release);
-}
-
 static inline int kref_put_mutex(struct kref *kref,
                                 void (*release)(struct kref *kref),
                                 struct mutex *lock)
 {
        WARN_ON(release == NULL);
-       if (unlikely(!atomic_add_unless(&kref->refcount, -1, 1))) {
-               mutex_lock(lock);
-               if (unlikely(!atomic_dec_and_test(&kref->refcount))) {
-                       mutex_unlock(lock);
-                       return 0;
-               }
+
+       if (refcount_dec_and_mutex_lock(&kref->refcount, lock)) {
+               release(kref);
+               return 1;
+       }
+       return 0;
+}
+
+static inline int kref_put_lock(struct kref *kref,
+                               void (*release)(struct kref *kref),
+                               spinlock_t *lock)
+{
+       WARN_ON(release == NULL);
+
+       if (refcount_dec_and_lock(&kref->refcount, lock)) {
                release(kref);
                return 1;
        }
@@ -133,6 +119,6 @@ static inline int kref_put_mutex(struct kref *kref,
  */
 static inline int __must_check kref_get_unless_zero(struct kref *kref)
 {
-       return atomic_add_unless(&kref->refcount, 1, 0);
+       return refcount_inc_not_zero(&kref->refcount);
 }
 #endif /* _KREF_H_ */
index 569cb531094c20a9aa2db478aaa6f348d2afd7f4..38c0bd7ca1074af234d516275791d05f945ce1f0 100644 (file)
@@ -13,6 +13,7 @@
 #define __LINUX_LEDS_H_INCLUDED
 
 #include <linux/device.h>
+#include <linux/kernfs.h>
 #include <linux/list.h>
 #include <linux/mutex.h>
 #include <linux/rwsem.h>
@@ -27,6 +28,7 @@ struct device;
 
 enum led_brightness {
        LED_OFF         = 0,
+       LED_ON          = 1,
        LED_HALF        = 127,
        LED_FULL        = 255,
 };
@@ -46,6 +48,7 @@ struct led_classdev {
 #define LED_DEV_CAP_FLASH      (1 << 18)
 #define LED_HW_PLUGGABLE       (1 << 19)
 #define LED_PANIC_INDICATOR    (1 << 20)
+#define LED_BRIGHT_HW_CHANGED  (1 << 21)
 
        /* set_brightness_work / blink_timer flags, atomic, private. */
        unsigned long           work_flags;
@@ -110,6 +113,11 @@ struct led_classdev {
        bool                    activated;
 #endif
 
+#ifdef CONFIG_LEDS_BRIGHTNESS_HW_CHANGED
+       int                      brightness_hw_changed;
+       struct kernfs_node      *brightness_hw_changed_kn;
+#endif
+
        /* Ensures consistent access to the LED Flash Class device */
        struct mutex            led_access;
 };
@@ -422,4 +430,12 @@ static inline void ledtrig_cpu(enum cpu_led_event evt)
 }
 #endif
 
+#ifdef CONFIG_LEDS_BRIGHTNESS_HW_CHANGED
+extern void led_classdev_notify_brightness_hw_changed(
+       struct led_classdev *led_cdev, enum led_brightness brightness);
+#else
+static inline void led_classdev_notify_brightness_hw_changed(
+       struct led_classdev *led_cdev, enum led_brightness brightness) { }
+#endif
+
 #endif         /* __LINUX_LEDS_H_INCLUDED */
index fd4ca0b4fe0f72532767fca40f0ce2d60cf9d3ae..171baa90f6f69c6b55b15772d395f6a168bec4c7 100644 (file)
@@ -3,28 +3,33 @@
 /*
  * Lock-less NULL terminated single linked list
  *
- * If there are multiple producers and multiple consumers, llist_add
- * can be used in producers and llist_del_all can be used in
- * consumers.  They can work simultaneously without lock.  But
- * llist_del_first can not be used here.  Because llist_del_first
- * depends on list->first->next does not changed if list->first is not
- * changed during its operation, but llist_del_first, llist_add,
- * llist_add (or llist_del_all, llist_add, llist_add) sequence in
- * another consumer may violate that.
- *
- * If there are multiple producers and one consumer, llist_add can be
- * used in producers and llist_del_all or llist_del_first can be used
- * in the consumer.
- *
- * This can be summarized as follow:
+ * Cases where locking is not needed:
+ * If there are multiple producers and multiple consumers, llist_add can be
+ * used in producers and llist_del_all can be used in consumers simultaneously
+ * without locking. Also a single consumer can use llist_del_first while
+ * multiple producers simultaneously use llist_add, without any locking.
+ *
+ * Cases where locking is needed:
+ * If we have multiple consumers with llist_del_first used in one consumer, and
+ * llist_del_first or llist_del_all used in other consumers, then a lock is
+ * needed.  This is because llist_del_first depends on list->first->next not
+ * changing, but without lock protection, there's no way to be sure about that
+ * if a preemption happens in the middle of the delete operation and on being
+ * preempted back, the list->first is the same as before causing the cmpxchg in
+ * llist_del_first to succeed. For example, while a llist_del_first operation
+ * is in progress in one consumer, then a llist_del_first, llist_add,
+ * llist_add (or llist_del_all, llist_add, llist_add) sequence in another
+ * consumer may cause violations.
+ *
+ * This can be summarized as follows:
  *
  *           |   add    | del_first |  del_all
  * add       |    -     |     -     |     -
  * del_first |          |     L     |     L
  * del_all   |          |           |     -
  *
- * Where "-" stands for no lock is needed, while "L" stands for lock
- * is needed.
+ * Where, a particular row's operation can happen concurrently with a column's
+ * operation, with "-" being no lock needed, while "L" being lock is needed.
  *
  * The list entries deleted via llist_del_all can be traversed with
  * traversing function such as llist_for_each etc.  But the list
index fd7ff3d91e6a920ff084beca09d10b5b9abba981..ef3d4f67118ce0f60789e6e749a4773754e01e87 100644 (file)
@@ -203,6 +203,17 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
  *  ... and so on.
  */
 
-#define order_base_2(n) ilog2(roundup_pow_of_two(n))
+static inline __attribute_const__
+int __order_base_2(unsigned long n)
+{
+       return n > 1 ? ilog2(n - 1) + 1 : 0;
+}
 
+#define order_base_2(n)                                \
+(                                              \
+       __builtin_constant_p(n) ? (             \
+               ((n) == 0 || (n) == 1) ? 0 :    \
+               ilog2((n) - 1) + 1) :           \
+       __order_base_2(n)                       \
+)
 #endif /* _LINUX_LOG2_H */
index 6e8b5b270ffeada870b3cdcd638530f2479b84ee..80690c96c734d4fea4cd8970fe6a93b89eb34379 100644 (file)
@@ -133,6 +133,16 @@ __iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
        return ret;
 }
 
+#ifndef mul_u32_u32
+/*
+ * Many a GCC version messes this up and generates a 64x64 mult :-(
+ */
+static inline u64 mul_u32_u32(u32 a, u32 b)
+{
+       return (u64)a * b;
+}
+#endif
+
 #if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)
 
 #ifndef mul_u64_u32_shr
@@ -160,9 +170,9 @@ static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
        al = a;
        ah = a >> 32;
 
-       ret = ((u64)al * mul) >> shift;
+       ret = mul_u32_u32(al, mul) >> shift;
        if (ah)
-               ret += ((u64)ah * mul) << (32 - shift);
+               ret += mul_u32_u32(ah, mul) << (32 - shift);
 
        return ret;
 }
@@ -186,10 +196,10 @@ static inline u64 mul_u64_u64_shr(u64 a, u64 b, unsigned int shift)
        a0.ll = a;
        b0.ll = b;
 
-       rl.ll = (u64)a0.l.low * b0.l.low;
-       rm.ll = (u64)a0.l.low * b0.l.high;
-       rn.ll = (u64)a0.l.high * b0.l.low;
-       rh.ll = (u64)a0.l.high * b0.l.high;
+       rl.ll = mul_u32_u32(a0.l.low, b0.l.low);
+       rm.ll = mul_u32_u32(a0.l.low, b0.l.high);
+       rn.ll = mul_u32_u32(a0.l.high, b0.l.low);
+       rh.ll = mul_u32_u32(a0.l.high, b0.l.high);
 
        /*
         * Each of these lines computes a 64-bit intermediate result into "c",
@@ -229,8 +239,8 @@ static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor)
        } u, rl, rh;
 
        u.ll = a;
-       rl.ll = (u64)u.l.low * mul;
-       rh.ll = (u64)u.l.high * mul + rl.l.high;
+       rl.ll = mul_u32_u32(u.l.low, mul);
+       rh.ll = mul_u32_u32(u.l.high, mul) + rl.l.high;
 
        /* Bits 32-63 of the result will be in rh.l.low. */
        rl.l.high = do_div(rh.ll, divisor);
index 01033fadea4766d5e6efddc78ca595d68c021464..134a2f69c21abf7921181af0adff033bb459edc5 100644 (file)
@@ -85,7 +85,8 @@ extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages);
 extern int add_one_highpage(struct page *page, int pfn, int bad_ppro);
 /* VM interface that may be used by firmware interface */
 extern int online_pages(unsigned long, unsigned long, int);
-extern int test_pages_in_a_zone(unsigned long, unsigned long);
+extern int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
+       unsigned long *valid_start, unsigned long *valid_end);
 extern void __offline_isolated_pages(unsigned long, unsigned long);
 
 typedef void (*online_page_callback_t)(struct page *page);
@@ -284,7 +285,7 @@ extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms,
                unsigned long map_offset);
 extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
                                          unsigned long pnum);
-extern int zone_can_shift(unsigned long pfn, unsigned long nr_pages,
-                         enum zone_type target);
+extern bool zone_can_shift(unsigned long pfn, unsigned long nr_pages,
+                         enum zone_type target, int *zone_shift);
 
 #endif /* __LINUX_MEMORY_HOTPLUG_H */
index a4860bc9b73d4ffe927d087c24b70970084ff330..f848ee86a339bf37d662459b1443edbd8580e968 100644 (file)
@@ -13,7 +13,7 @@
 
 #include <linux/regmap.h>
 
-enum {
+enum axp20x_variants {
        AXP152_ID = 0,
        AXP202_ID,
        AXP209_ID,
@@ -532,35 +532,6 @@ struct axp20x_dev {
        const struct regmap_irq_chip    *regmap_irq_chip;
 };
 
-#define BATTID_LEN                             64
-#define OCV_CURVE_SIZE                 32
-#define MAX_THERM_CURVE_SIZE   25
-#define PD_DEF_MIN_TEMP                        0
-#define PD_DEF_MAX_TEMP                        55
-
-struct axp20x_fg_pdata {
-       char battid[BATTID_LEN + 1];
-       int design_cap;
-       int min_volt;
-       int max_volt;
-       int max_temp;
-       int min_temp;
-       int cap1;
-       int cap0;
-       int rdc1;
-       int rdc0;
-       int ocv_curve[OCV_CURVE_SIZE];
-       int tcsz;
-       int thermistor_curve[MAX_THERM_CURVE_SIZE][2];
-};
-
-struct axp20x_chrg_pdata {
-       int max_cc;
-       int max_cv;
-       int def_cc;
-       int def_cv;
-};
-
 struct axp288_extcon_pdata {
        /* GPIO pin control to switch D+/D- lines b/w PMIC and SOC */
        struct gpio_desc *gpio_mux_cntl;
index 2b300b44f99440ad4e0be7c57bb7f07cc5d811d1..fba8fcb54f8ceb4f0020b5fdedcd86b4666f864d 100644 (file)
@@ -20,6 +20,8 @@
 #ifndef LPC_ICH_H
 #define LPC_ICH_H
 
+#include <linux/platform_data/intel-spi.h>
+
 /* GPIO resources */
 #define ICH_RES_GPIO   0
 #define ICH_RES_GPE0   1
@@ -40,6 +42,7 @@ struct lpc_ich_info {
        char name[32];
        unsigned int iTCO_version;
        unsigned int gpio_version;
+       enum intel_spi_type spi_type;
        u8 use_gpio;
 };
 
index 257173e0095ebdb22e1684fbd56ce05a8971ee1d..f541da68d1e7c50f1bb543c05e57a73001c86d28 100644 (file)
@@ -35,6 +35,8 @@
 #define PHY_ID_KSZ886X         0x00221430
 #define PHY_ID_KSZ8863         0x00221435
 
+#define PHY_ID_KSZ8795         0x00221550
+
 /* struct phy_device dev_flags definitions */
 #define MICREL_PHY_50MHZ_CLK   0x00000001
 #define MICREL_PHY_FXEN                0x00000002
index 36d9896fbc1eb0d12e60682f15e96648c13ebf98..f4aac87adcc3555014f6215d6599b604c70388e6 100644 (file)
@@ -972,12 +972,16 @@ static __always_inline struct zoneref *next_zones_zonelist(struct zoneref *z,
  * @zonelist - The zonelist to search for a suitable zone
  * @highest_zoneidx - The zone index of the highest zone to return
  * @nodes - An optional nodemask to filter the zonelist with
- * @zone - The first suitable zone found is returned via this parameter
+ * @return - Zoneref pointer for the first suitable zone found (see below)
  *
  * This function returns the first zone at or below a given zone index that is
  * within the allowed nodemask. The zoneref returned is a cursor that can be
  * used to iterate the zonelist with next_zones_zonelist by advancing it by
  * one before calling.
+ *
+ * When no eligible zone is found, zoneref->zone is NULL (zoneref itself is
+ * never NULL). This may happen either genuinely, or due to concurrent nodemask
+ * update due to cpuset modification.
  */
 static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
                                        enum zone_type highest_zoneidx,
index 7c84273d60b963d44c032761cac62194e82d1198..cc7cba219b207de5536f6f9e9353d1a20201e4af 100644 (file)
@@ -346,7 +346,7 @@ struct module {
 
        /* Exported symbols */
        const struct kernel_symbol *syms;
-       const unsigned long *crcs;
+       const s32 *crcs;
        unsigned int num_syms;
 
        /* Kernel parameters. */
@@ -359,18 +359,18 @@ struct module {
        /* GPL-only exported symbols. */
        unsigned int num_gpl_syms;
        const struct kernel_symbol *gpl_syms;
-       const unsigned long *gpl_crcs;
+       const s32 *gpl_crcs;
 
 #ifdef CONFIG_UNUSED_SYMBOLS
        /* unused exported symbols. */
        const struct kernel_symbol *unused_syms;
-       const unsigned long *unused_crcs;
+       const s32 *unused_crcs;
        unsigned int num_unused_syms;
 
        /* GPL-only, unused exported symbols. */
        unsigned int num_unused_gpl_syms;
        const struct kernel_symbol *unused_gpl_syms;
-       const unsigned long *unused_gpl_crcs;
+       const s32 *unused_gpl_crcs;
 #endif
 
 #ifdef CONFIG_MODULE_SIG
@@ -382,7 +382,7 @@ struct module {
 
        /* symbols that will be GPL-only in the near future. */
        const struct kernel_symbol *gpl_future_syms;
-       const unsigned long *gpl_future_crcs;
+       const s32 *gpl_future_crcs;
        unsigned int num_gpl_future_syms;
 
        /* Exception table */
@@ -523,7 +523,7 @@ struct module *find_module(const char *name);
 
 struct symsearch {
        const struct kernel_symbol *start, *stop;
-       const unsigned long *crcs;
+       const s32 *crcs;
        enum {
                NOT_GPL_ONLY,
                GPL_ONLY,
@@ -539,7 +539,7 @@ struct symsearch {
  */
 const struct kernel_symbol *find_symbol(const char *name,
                                        struct module **owner,
-                                       const unsigned long **crc,
+                                       const s32 **crc,
                                        bool gplok,
                                        bool warn);
 
index 0db320b7bb15d94a07efd185f7266fe6e5a74a7e..a83b84ff70e52c88d131593e2ff90451da63cfdd 100644 (file)
@@ -17,7 +17,13 @@ struct msi_desc;
 struct pci_dev;
 struct platform_msi_priv_data;
 void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
+#ifdef CONFIG_GENERIC_MSI_IRQ
 void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg);
+#else
+static inline void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg)
+{
+}
+#endif
 
 typedef void (*irq_write_msi_msg_t)(struct msi_desc *desc,
                                    struct msi_msg *msg);
@@ -116,11 +122,15 @@ struct msi_desc {
 
 struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc);
 void *msi_desc_to_pci_sysdata(struct msi_desc *desc);
+void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg);
 #else /* CONFIG_PCI_MSI */
 static inline void *msi_desc_to_pci_sysdata(struct msi_desc *desc)
 {
        return NULL;
 }
+static inline void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg)
+{
+}
 #endif /* CONFIG_PCI_MSI */
 
 struct msi_desc *alloc_msi_entry(struct device *dev, int nvec,
@@ -128,7 +138,6 @@ struct msi_desc *alloc_msi_entry(struct device *dev, int nvec,
 void free_msi_entry(struct msi_desc *entry);
 void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
 void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
-void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg);
 
 u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag);
 u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag);
diff --git a/include/linux/mtd/fsmc.h b/include/linux/mtd/fsmc.h
deleted file mode 100644 (file)
index ad3c348..0000000
+++ /dev/null
@@ -1,156 +0,0 @@
-/*
- * incude/mtd/fsmc.h
- *
- * ST Microelectronics
- * Flexible Static Memory Controller (FSMC)
- * platform data interface and header file
- *
- * Copyright Â© 2010 ST Microelectronics
- * Vipin Kumar <vipin.kumar@st.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#ifndef __MTD_FSMC_H
-#define __MTD_FSMC_H
-
-#include <linux/io.h>
-#include <linux/platform_device.h>
-#include <linux/mtd/physmap.h>
-#include <linux/types.h>
-#include <linux/mtd/partitions.h>
-#include <asm/param.h>
-
-#define FSMC_NAND_BW8          1
-#define FSMC_NAND_BW16         2
-
-#define FSMC_MAX_NOR_BANKS     4
-#define FSMC_MAX_NAND_BANKS    4
-
-#define FSMC_FLASH_WIDTH8      1
-#define FSMC_FLASH_WIDTH16     2
-
-/* fsmc controller registers for NOR flash */
-#define CTRL                   0x0
-       /* ctrl register definitions */
-       #define BANK_ENABLE             (1 << 0)
-       #define MUXED                   (1 << 1)
-       #define NOR_DEV                 (2 << 2)
-       #define WIDTH_8                 (0 << 4)
-       #define WIDTH_16                (1 << 4)
-       #define RSTPWRDWN               (1 << 6)
-       #define WPROT                   (1 << 7)
-       #define WRT_ENABLE              (1 << 12)
-       #define WAIT_ENB                (1 << 13)
-
-#define CTRL_TIM               0x4
-       /* ctrl_tim register definitions */
-
-#define FSMC_NOR_BANK_SZ       0x8
-#define FSMC_NOR_REG_SIZE      0x40
-
-#define FSMC_NOR_REG(base, bank, reg)          (base + \
-                                               FSMC_NOR_BANK_SZ * (bank) + \
-                                               reg)
-
-/* fsmc controller registers for NAND flash */
-#define PC                     0x00
-       /* pc register definitions */
-       #define FSMC_RESET              (1 << 0)
-       #define FSMC_WAITON             (1 << 1)
-       #define FSMC_ENABLE             (1 << 2)
-       #define FSMC_DEVTYPE_NAND       (1 << 3)
-       #define FSMC_DEVWID_8           (0 << 4)
-       #define FSMC_DEVWID_16          (1 << 4)
-       #define FSMC_ECCEN              (1 << 6)
-       #define FSMC_ECCPLEN_512        (0 << 7)
-       #define FSMC_ECCPLEN_256        (1 << 7)
-       #define FSMC_TCLR_1             (1)
-       #define FSMC_TCLR_SHIFT         (9)
-       #define FSMC_TCLR_MASK          (0xF)
-       #define FSMC_TAR_1              (1)
-       #define FSMC_TAR_SHIFT          (13)
-       #define FSMC_TAR_MASK           (0xF)
-#define STS                    0x04
-       /* sts register definitions */
-       #define FSMC_CODE_RDY           (1 << 15)
-#define COMM                   0x08
-       /* comm register definitions */
-       #define FSMC_TSET_0             0
-       #define FSMC_TSET_SHIFT         0
-       #define FSMC_TSET_MASK          0xFF
-       #define FSMC_TWAIT_6            6
-       #define FSMC_TWAIT_SHIFT        8
-       #define FSMC_TWAIT_MASK         0xFF
-       #define FSMC_THOLD_4            4
-       #define FSMC_THOLD_SHIFT        16
-       #define FSMC_THOLD_MASK         0xFF
-       #define FSMC_THIZ_1             1
-       #define FSMC_THIZ_SHIFT         24
-       #define FSMC_THIZ_MASK          0xFF
-#define ATTRIB                 0x0C
-#define IOATA                  0x10
-#define ECC1                   0x14
-#define ECC2                   0x18
-#define ECC3                   0x1C
-#define FSMC_NAND_BANK_SZ      0x20
-
-#define FSMC_NAND_REG(base, bank, reg)         (base + FSMC_NOR_REG_SIZE + \
-                                               (FSMC_NAND_BANK_SZ * (bank)) + \
-                                               reg)
-
-#define FSMC_BUSY_WAIT_TIMEOUT (1 * HZ)
-
-struct fsmc_nand_timings {
-       uint8_t tclr;
-       uint8_t tar;
-       uint8_t thiz;
-       uint8_t thold;
-       uint8_t twait;
-       uint8_t tset;
-};
-
-enum access_mode {
-       USE_DMA_ACCESS = 1,
-       USE_WORD_ACCESS,
-};
-
-/**
- * fsmc_nand_platform_data - platform specific NAND controller config
- * @nand_timings: timing setup for the physical NAND interface
- * @partitions: partition table for the platform, use a default fallback
- * if this is NULL
- * @nr_partitions: the number of partitions in the previous entry
- * @options: different options for the driver
- * @width: bus width
- * @bank: default bank
- * @select_bank: callback to select a certain bank, this is
- * platform-specific. If the controller only supports one bank
- * this may be set to NULL
- */
-struct fsmc_nand_platform_data {
-       struct fsmc_nand_timings *nand_timings;
-       struct mtd_partition    *partitions;
-       unsigned int            nr_partitions;
-       unsigned int            options;
-       unsigned int            width;
-       unsigned int            bank;
-
-       enum access_mode        mode;
-
-       void                    (*select_bank)(uint32_t bank, uint32_t busw);
-
-       /* priv structures for dma accesses */
-       void                    *read_dma_priv;
-       void                    *write_dma_priv;
-};
-
-extern int __init fsmc_nor_init(struct platform_device *pdev,
-               unsigned long base, uint32_t bank, uint32_t width);
-extern void __init fsmc_init_board_info(struct platform_device *pdev,
-               struct mtd_partition *partitions, unsigned int nr_partitions,
-               unsigned int width);
-
-#endif /* __MTD_FSMC_H */
index 13f8052b9ff929a6a1809fbadd5a1424fa8304e2..eebdc63cf6af94a5ea9a5c4703c416f87e0079a8 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/uio.h>
 #include <linux/notifier.h>
 #include <linux/device.h>
+#include <linux/of.h>
 
 #include <mtd/mtd-abi.h>
 
@@ -322,6 +323,7 @@ struct mtd_info {
        int (*_block_isreserved) (struct mtd_info *mtd, loff_t ofs);
        int (*_block_isbad) (struct mtd_info *mtd, loff_t ofs);
        int (*_block_markbad) (struct mtd_info *mtd, loff_t ofs);
+       int (*_max_bad_blocks) (struct mtd_info *mtd, loff_t ofs, size_t len);
        int (*_suspend) (struct mtd_info *mtd);
        void (*_resume) (struct mtd_info *mtd);
        void (*_reboot) (struct mtd_info *mtd);
@@ -385,6 +387,8 @@ static inline void mtd_set_of_node(struct mtd_info *mtd,
                                   struct device_node *np)
 {
        mtd->dev.of_node = np;
+       if (!mtd->name)
+               of_property_read_string(np, "label", &mtd->name);
 }
 
 static inline struct device_node *mtd_get_of_node(struct mtd_info *mtd)
@@ -397,6 +401,18 @@ static inline int mtd_oobavail(struct mtd_info *mtd, struct mtd_oob_ops *ops)
        return ops->mode == MTD_OPS_AUTO_OOB ? mtd->oobavail : mtd->oobsize;
 }
 
+static inline int mtd_max_bad_blocks(struct mtd_info *mtd,
+                                    loff_t ofs, size_t len)
+{
+       if (!mtd->_max_bad_blocks)
+               return -ENOTSUPP;
+
+       if (mtd->size < (len + ofs) || ofs < 0)
+               return -EINVAL;
+
+       return mtd->_max_bad_blocks(mtd, ofs, len);
+}
+
 int mtd_wunit_to_pairing_info(struct mtd_info *mtd, int wunit,
                              struct mtd_pairing_info *info);
 int mtd_pairing_info_to_wunit(struct mtd_info *mtd,
index c5f3a012ae62fec6766cf55a85c7f61ddecee3f7..9591e0fbe5bd76a1c47e2deb4292469e0a2cf8e6 100644 (file)
@@ -615,7 +615,7 @@ struct nand_buffers {
  * @tALS_min: ALE setup time
  * @tAR_min: ALE to RE# delay
  * @tCEA_max: CE# access time
- * @tCEH_min:
+ * @tCEH_min: CE# high hold time
  * @tCH_min:  CE# hold time
  * @tCHZ_max: CE# high to output hi-Z
  * @tCLH_min: CLE hold time
@@ -801,6 +801,10 @@ nand_get_sdr_timings(const struct nand_data_interface *conf)
  *                     supported, 0 otherwise.
  * @jedec_params:      [INTERN] holds the JEDEC parameter page when JEDEC is
  *                     supported, 0 otherwise.
+ * @max_bb_per_die:    [INTERN] the max number of bad blocks each die of a
+ *                     this nand device will encounter their life times.
+ * @blocks_per_die:    [INTERN] The number of PEBs in a die
+ * @data_interface:    [INTERN] NAND interface timing information
  * @read_retries:      [INTERN] the number of read retry modes supported
  * @onfi_set_features: [REPLACEABLE] set the features for ONFI nand
  * @onfi_get_features: [REPLACEABLE] get the features for ONFI nand
@@ -883,6 +887,8 @@ struct nand_chip {
                struct nand_onfi_params onfi_params;
                struct nand_jedec_params jedec_params;
        };
+       u16 max_bb_per_die;
+       u32 blocks_per_die;
 
        struct nand_data_interface *data_interface;
 
@@ -958,6 +964,7 @@ static inline void nand_set_controller_data(struct nand_chip *chip, void *priv)
 #define NAND_MFR_SANDISK       0x45
 #define NAND_MFR_INTEL         0x89
 #define NAND_MFR_ATO           0x9b
+#define NAND_MFR_WINBOND       0xef
 
 /* The maximum expected count of bytes in the NAND ID sequence */
 #define NAND_MAX_ID_LEN 8
index 70736e1e6c8f70c5f8f3e53656a282332b33b539..06df1e06b6e03bb953f950ad55e9e516ed5a9dc5 100644 (file)
@@ -41,6 +41,7 @@ struct mtd_partition {
        uint64_t size;                  /* partition size */
        uint64_t offset;                /* offset within the master MTD space */
        uint32_t mask_flags;            /* master MTD flags to mask out for this partition */
+       struct device_node *of_node;
 };
 
 #define MTDPART_OFS_RETAIN     (-3)
index c425c7b4c2a09af2cbb38ea25b780463dbe3230f..f2a718030476f734c6f8c4a620b7ea3f1770dc28 100644 (file)
 #define SPINOR_OP_WRSR         0x01    /* Write status register 1 byte */
 #define SPINOR_OP_READ         0x03    /* Read data bytes (low frequency) */
 #define SPINOR_OP_READ_FAST    0x0b    /* Read data bytes (high frequency) */
-#define SPINOR_OP_READ_1_1_2   0x3b    /* Read data bytes (Dual SPI) */
-#define SPINOR_OP_READ_1_1_4   0x6b    /* Read data bytes (Quad SPI) */
+#define SPINOR_OP_READ_1_1_2   0x3b    /* Read data bytes (Dual Output SPI) */
+#define SPINOR_OP_READ_1_2_2   0xbb    /* Read data bytes (Dual I/O SPI) */
+#define SPINOR_OP_READ_1_1_4   0x6b    /* Read data bytes (Quad Output SPI) */
+#define SPINOR_OP_READ_1_4_4   0xeb    /* Read data bytes (Quad I/O SPI) */
 #define SPINOR_OP_PP           0x02    /* Page program (up to 256 bytes) */
+#define SPINOR_OP_PP_1_1_4     0x32    /* Quad page program */
+#define SPINOR_OP_PP_1_4_4     0x38    /* Quad page program */
 #define SPINOR_OP_BE_4K                0x20    /* Erase 4KiB block */
 #define SPINOR_OP_BE_4K_PMC    0xd7    /* Erase 4KiB block on PMC chips */
 #define SPINOR_OP_BE_32K       0x52    /* Erase 32KiB block */
 #define SPINOR_OP_RDFSR                0x70    /* Read flag status register */
 
 /* 4-byte address opcodes - used on Spansion and some Macronix flashes. */
-#define SPINOR_OP_READ4                0x13    /* Read data bytes (low frequency) */
-#define SPINOR_OP_READ4_FAST   0x0c    /* Read data bytes (high frequency) */
-#define SPINOR_OP_READ4_1_1_2  0x3c    /* Read data bytes (Dual SPI) */
-#define SPINOR_OP_READ4_1_1_4  0x6c    /* Read data bytes (Quad SPI) */
+#define SPINOR_OP_READ_4B      0x13    /* Read data bytes (low frequency) */
+#define SPINOR_OP_READ_FAST_4B 0x0c    /* Read data bytes (high frequency) */
+#define SPINOR_OP_READ_1_1_2_4B        0x3c    /* Read data bytes (Dual Output SPI) */
+#define SPINOR_OP_READ_1_2_2_4B        0xbc    /* Read data bytes (Dual I/O SPI) */
+#define SPINOR_OP_READ_1_1_4_4B        0x6c    /* Read data bytes (Quad Output SPI) */
+#define SPINOR_OP_READ_1_4_4_4B        0xec    /* Read data bytes (Quad I/O SPI) */
 #define SPINOR_OP_PP_4B                0x12    /* Page program (up to 256 bytes) */
+#define SPINOR_OP_PP_1_1_4_4B  0x34    /* Quad page program */
+#define SPINOR_OP_PP_1_4_4_4B  0x3e    /* Quad page program */
+#define SPINOR_OP_BE_4K_4B     0x21    /* Erase 4KiB block */
+#define SPINOR_OP_BE_32K_4B    0x5c    /* Erase 32KiB block */
 #define SPINOR_OP_SE_4B                0xdc    /* Sector erase (usually 64KiB) */
 
 /* Used for SST flashes only. */
 #define SPINOR_OP_WRDI         0x04    /* Write disable */
 #define SPINOR_OP_AAI_WP       0xad    /* Auto address increment word program */
 
+/* Used for S3AN flashes only */
+#define SPINOR_OP_XSE          0x50    /* Sector erase */
+#define SPINOR_OP_XPP          0x82    /* Page program */
+#define SPINOR_OP_XRDSR                0xd7    /* Read status register */
+
+#define XSR_PAGESIZE           BIT(0)  /* Page size in Po2 or Linear */
+#define XSR_RDY                        BIT(7)  /* Ready */
+
+
 /* Used for Macronix and Winbond flashes. */
 #define SPINOR_OP_EN4B         0xb7    /* Enter 4-byte mode */
 #define SPINOR_OP_EX4B         0xe9    /* Exit 4-byte mode */
@@ -119,6 +138,9 @@ enum spi_nor_ops {
 enum spi_nor_option_flags {
        SNOR_F_USE_FSR          = BIT(0),
        SNOR_F_HAS_SR_TB        = BIT(1),
+       SNOR_F_NO_OP_CHIP_ERASE = BIT(2),
+       SNOR_F_S3AN_ADDR_DEFAULT = BIT(3),
+       SNOR_F_READY_XSR_RDY    = BIT(4),
 };
 
 /**
index b97870f2debd063ba358a4d2343955f39dd6b994..1127fe31645dddc1e8a59a13d78b74bf8770f60c 100644 (file)
@@ -20,6 +20,8 @@
 #include <linux/osq_lock.h>
 #include <linux/debug_locks.h>
 
+struct ww_acquire_ctx;
+
 /*
  * Simple, straightforward mutexes with strict semantics:
  *
@@ -65,7 +67,7 @@ struct mutex {
 
 static inline struct task_struct *__mutex_owner(struct mutex *lock)
 {
-       return (struct task_struct *)(atomic_long_read(&lock->owner) & ~0x03);
+       return (struct task_struct *)(atomic_long_read(&lock->owner) & ~0x07);
 }
 
 /*
@@ -75,6 +77,7 @@ static inline struct task_struct *__mutex_owner(struct mutex *lock)
 struct mutex_waiter {
        struct list_head        list;
        struct task_struct      *task;
+       struct ww_acquire_ctx   *ww_ctx;
 #ifdef CONFIG_DEBUG_MUTEXES
        void                    *magic;
 #endif
@@ -156,10 +159,12 @@ extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock,
                                        unsigned int subclass);
 extern int __must_check mutex_lock_killable_nested(struct mutex *lock,
                                        unsigned int subclass);
+extern void mutex_lock_io_nested(struct mutex *lock, unsigned int subclass);
 
 #define mutex_lock(lock) mutex_lock_nested(lock, 0)
 #define mutex_lock_interruptible(lock) mutex_lock_interruptible_nested(lock, 0)
 #define mutex_lock_killable(lock) mutex_lock_killable_nested(lock, 0)
+#define mutex_lock_io(lock) mutex_lock_io_nested(lock, 0)
 
 #define mutex_lock_nest_lock(lock, nest_lock)                          \
 do {                                                                   \
@@ -171,11 +176,13 @@ do {                                                                      \
 extern void mutex_lock(struct mutex *lock);
 extern int __must_check mutex_lock_interruptible(struct mutex *lock);
 extern int __must_check mutex_lock_killable(struct mutex *lock);
+extern void mutex_lock_io(struct mutex *lock);
 
 # define mutex_lock_nested(lock, subclass) mutex_lock(lock)
 # define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock)
 # define mutex_lock_killable_nested(lock, subclass) mutex_lock_killable(lock)
 # define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock)
+# define mutex_lock_io_nested(lock, subclass) mutex_lock(lock)
 #endif
 
 /*
index 9bde9558b59672a866bd763039d326bde2af0f81..27914672602d9d573e6a3da271cec33ccef51b16 100644 (file)
@@ -866,11 +866,15 @@ struct netdev_xdp {
  *     of useless work if you return NETDEV_TX_BUSY.
  *     Required; cannot be NULL.
  *
- * netdev_features_t (*ndo_fix_features)(struct net_device *dev,
- *             netdev_features_t features);
- *     Adjusts the requested feature flags according to device-specific
- *     constraints, and returns the resulting flags. Must not modify
- *     the device state.
+ * netdev_features_t (*ndo_features_check)(struct sk_buff *skb,
+ *                                        struct net_device *dev
+ *                                        netdev_features_t features);
+ *     Called by core transmit path to determine if device is capable of
+ *     performing offload operations on a given packet. This is to give
+ *     the device an opportunity to implement any restrictions that cannot
+ *     be otherwise expressed by feature flags. The check is called with
+ *     the set of features that the stack has calculated and it returns
+ *     those the driver believes to be appropriate.
  *
  * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
  *                         void *accel_priv, select_queue_fallback_t fallback);
@@ -1028,6 +1032,12 @@ struct netdev_xdp {
  *     Called to release previously enslaved netdev.
  *
  *      Feature/offload setting functions.
+ * netdev_features_t (*ndo_fix_features)(struct net_device *dev,
+ *             netdev_features_t features);
+ *     Adjusts the requested feature flags according to device-specific
+ *     constraints, and returns the resulting flags. Must not modify
+ *     the device state.
+ *
  * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features);
  *     Called to update device configuration to new features. Passed
  *     feature set might be less than what was returned by ndo_fix_features()).
@@ -1100,15 +1110,6 @@ struct netdev_xdp {
  *     Callback to use for xmit over the accelerated station. This
  *     is used in place of ndo_start_xmit on accelerated net
  *     devices.
- * netdev_features_t (*ndo_features_check)(struct sk_buff *skb,
- *                                        struct net_device *dev
- *                                        netdev_features_t features);
- *     Called by core transmit path to determine if device is capable of
- *     performing offload operations on a given packet. This is to give
- *     the device an opportunity to implement any restrictions that cannot
- *     be otherwise expressed by feature flags. The check is called with
- *     the set of features that the stack has calculated and it returns
- *     those the driver believes to be appropriate.
  * int (*ndo_set_tx_maxrate)(struct net_device *dev,
  *                          int queue_index, u32 maxrate);
  *     Called when a user wants to set a max-rate limitation of specific
@@ -1510,6 +1511,7 @@ enum netdev_priv_flags {
  *     @max_mtu:       Interface Maximum MTU value
  *     @type:          Interface hardware type
  *     @hard_header_len: Maximum hardware header length.
+ *     @min_header_len:  Minimum hardware header length
  *
  *     @needed_headroom: Extra headroom the hardware may need, but not in all
  *                       cases can this be guaranteed
@@ -1727,6 +1729,7 @@ struct net_device {
        unsigned int            max_mtu;
        unsigned short          type;
        unsigned short          hard_header_len;
+       unsigned short          min_header_len;
 
        unsigned short          needed_headroom;
        unsigned short          needed_tailroom;
@@ -2693,6 +2696,8 @@ static inline bool dev_validate_header(const struct net_device *dev,
 {
        if (likely(len >= dev->hard_header_len))
                return true;
+       if (len < dev->min_header_len)
+               return false;
 
        if (capable(CAP_SYS_RAWIO)) {
                memset(ll_header + len, 0, dev->hard_header_len - len);
index bca536341d1ae51781981906f069a96f612cf52f..1b1ca04820a306ebe5da0aa7d12035c9c3d5e8d0 100644 (file)
@@ -282,7 +282,7 @@ enum nfsstat4 {
 
 static inline bool seqid_mutating_err(u32 err)
 {
-       /* rfc 3530 section 8.1.5: */
+       /* See RFC 7530, section 9.1.7 */
        switch (err) {
        case NFS4ERR_STALE_CLIENTID:
        case NFS4ERR_STALE_STATEID:
@@ -291,6 +291,7 @@ static inline bool seqid_mutating_err(u32 err)
        case NFS4ERR_BADXDR:
        case NFS4ERR_RESOURCE:
        case NFS4ERR_NOFILEHANDLE:
+       case NFS4ERR_MOVED:
                return false;
        };
        return true;
index aacca824a6aef4fcc4d2480aa9eeefd2fe82d6f9..0a3fadc32693a9cf869693f4c406eee5d168e36b 100644 (file)
@@ -110,6 +110,7 @@ extern int watchdog_user_enabled;
 extern int watchdog_thresh;
 extern unsigned long watchdog_enabled;
 extern unsigned long *watchdog_cpumask_bits;
+extern atomic_t watchdog_park_in_progress;
 #ifdef CONFIG_SMP
 extern int sysctl_softlockup_all_cpu_backtrace;
 extern int sysctl_hardlockup_all_cpu_backtrace;
index 6a7fc50510999eb330982c3d7b4452cb373063e2..13394ac83c66a70946d5a269ca8014c15477f942 100644 (file)
@@ -31,17 +31,6 @@ static inline const struct iommu_ops *of_iommu_configure(struct device *dev,
 
 #endif /* CONFIG_OF_IOMMU */
 
-static inline void of_iommu_set_ops(struct device_node *np,
-                                   const struct iommu_ops *ops)
-{
-       iommu_register_instance(&np->fwnode, ops);
-}
-
-static inline const struct iommu_ops *of_iommu_get_ops(struct device_node *np)
-{
-       return iommu_get_instance(&np->fwnode);
-}
-
 extern struct of_device_id __iommu_of_table;
 
 typedef int (*of_iommu_init_fn)(struct device_node *);
index 1c7eec09e5eba7ae8c0cc8e82172791f992bb361..3a481a49546ef1c85d8f88bf7668f8a2c8e8c0f1 100644 (file)
@@ -204,7 +204,7 @@ static inline void percpu_ref_get(struct percpu_ref *ref)
 static inline bool percpu_ref_tryget(struct percpu_ref *ref)
 {
        unsigned long __percpu *percpu_count;
-       int ret;
+       bool ret;
 
        rcu_read_lock_sched();
 
@@ -238,7 +238,7 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref)
 static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
 {
        unsigned long __percpu *percpu_count;
-       int ret = false;
+       bool ret = false;
 
        rcu_read_lock_sched();
 
index 5b2e6159b744a35a845a82e9213295be754fbabd..93664f022ecf15fcbe7640d37989d520d3ae6d9a 100644 (file)
@@ -4,15 +4,15 @@
 #include <linux/atomic.h>
 #include <linux/rwsem.h>
 #include <linux/percpu.h>
-#include <linux/wait.h>
+#include <linux/rcuwait.h>
 #include <linux/rcu_sync.h>
 #include <linux/lockdep.h>
 
 struct percpu_rw_semaphore {
        struct rcu_sync         rss;
        unsigned int __percpu   *read_count;
-       struct rw_semaphore     rw_sem;
-       wait_queue_head_t       writer;
+       struct rw_semaphore     rw_sem; /* slowpath */
+       struct rcuwait          writer; /* blocked writer */
        int                     readers_block;
 };
 
@@ -22,7 +22,7 @@ static struct percpu_rw_semaphore name = {                            \
        .rss = __RCU_SYNC_INITIALIZER(name.rss, RCU_SCHED_SYNC),        \
        .read_count = &__percpu_rwsem_rc_##name,                        \
        .rw_sem = __RWSEM_INITIALIZER(name.rw_sem),                     \
-       .writer = __WAIT_QUEUE_HEAD_INITIALIZER(name.writer),           \
+       .writer = __RCUWAIT_INITIALIZER(name.writer),                   \
 }
 
 extern int __percpu_down_read(struct percpu_rw_semaphore *, int);
index 78ed8105e64d1ae0341104c8d129cfeba9402800..000fdb211c7d7e2c8fc7351a99dbf6b58fbcb135 100644 (file)
@@ -482,6 +482,7 @@ struct perf_addr_filter {
  * @list:      list of filters for this event
  * @lock:      spinlock that serializes accesses to the @list and event's
  *             (and its children's) filter generations.
+ * @nr_file_filters:   number of file-based filters
  *
  * A child event will use parent's @list (and therefore @lock), so they are
  * bundled together; see perf_event_addr_filters().
@@ -489,6 +490,7 @@ struct perf_addr_filter {
 struct perf_addr_filters_head {
        struct list_head        list;
        raw_spinlock_t          lock;
+       unsigned int            nr_file_filters;
 };
 
 /**
@@ -785,9 +787,9 @@ struct perf_cpu_context {
        ktime_t                         hrtimer_interval;
        unsigned int                    hrtimer_active;
 
-       struct pmu                      *unique_pmu;
 #ifdef CONFIG_CGROUP_PERF
        struct perf_cgroup              *cgrp;
+       struct list_head                cgrp_cpuctx_entry;
 #endif
 
        struct list_head                sched_cb_entry;
index f7d95f644eed9b7a15581a490a19a18ea68b050e..7fc1105605bfd2334dab7fd6e96f25a2c33ed566 100644 (file)
@@ -25,7 +25,6 @@
 #include <linux/timer.h>
 #include <linux/workqueue.h>
 #include <linux/mod_devicetable.h>
-#include <linux/phy_led_triggers.h>
 
 #include <linux/atomic.h>
 
index a2daea0a37d2ae14ed4c9d965bff2400e5528fd2..b37b05bfd1a6dd8af03b6295febf4e14915e6941 100644 (file)
@@ -18,11 +18,11 @@ struct phy_device;
 #ifdef CONFIG_LED_TRIGGER_PHY
 
 #include <linux/leds.h>
+#include <linux/phy.h>
 
 #define PHY_LED_TRIGGER_SPEED_SUFFIX_SIZE      10
-#define PHY_MII_BUS_ID_SIZE    (20 - 3)
 
-#define PHY_LINK_LED_TRIGGER_NAME_SIZE (PHY_MII_BUS_ID_SIZE + \
+#define PHY_LINK_LED_TRIGGER_NAME_SIZE (MII_BUS_ID_SIZE + \
                                       FIELD_SIZEOF(struct mdio_device, addr)+\
                                       PHY_LED_TRIGGER_SPEED_SUFFIX_SIZE)
 
diff --git a/include/linux/platform_data/intel-spi.h b/include/linux/platform_data/intel-spi.h
new file mode 100644 (file)
index 0000000..942b0c3
--- /dev/null
@@ -0,0 +1,31 @@
+/*
+ * Intel PCH/PCU SPI flash driver.
+ *
+ * Copyright (C) 2016, Intel Corporation
+ * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef INTEL_SPI_PDATA_H
+#define INTEL_SPI_PDATA_H
+
+enum intel_spi_type {
+       INTEL_SPI_BYT = 1,
+       INTEL_SPI_LPT,
+       INTEL_SPI_BXT,
+};
+
+/**
+ * struct intel_spi_boardinfo - Board specific data for Intel SPI driver
+ * @type: Type which this controller is compatible with
+ * @writeable: The chip is writeable
+ */
+struct intel_spi_boardinfo {
+       enum intel_spi_type type;
+       bool writeable;
+};
+
+#endif /* INTEL_SPI_PDATA_H */
index 9bb63ac13f04574d5cf84a87067f4917dc79a5f8..171a271c2cbd5e1d5d806b0275116b59ee6710bf 100644 (file)
@@ -5,25 +5,14 @@ struct spi_device;
 
 /**
  * struct ep93xx_spi_info - EP93xx specific SPI descriptor
- * @num_chipselect: number of chip selects on this board, must be
- *                  at least one
+ * @chipselect: array of gpio numbers to use as chip selects
+ * @num_chipselect: ARRAY_SIZE(chipselect)
  * @use_dma: use DMA for the transfers
  */
 struct ep93xx_spi_info {
+       int     *chipselect;
        int     num_chipselect;
        bool    use_dma;
 };
 
-/**
- * struct ep93xx_spi_chip_ops - operation callbacks for SPI slave device
- * @setup: setup the chip select mechanism
- * @cleanup: cleanup the chip select mechanism
- * @cs_control: control the device chip select
- */
-struct ep93xx_spi_chip_ops {
-       int     (*setup)(struct spi_device *spi);
-       void    (*cleanup)(struct spi_device *spi);
-       void    (*cs_control)(struct spi_device *spi, int value);
-};
-
 #endif /* __ASM_MACH_EP93XX_SPI_H */
index 81ece61075dff0e7a04e56f764bdae8a142f08e5..5339ed5bd6f9d04af80cd7da068a776517d9c4ff 100644 (file)
@@ -182,6 +182,9 @@ static inline int pm_genpd_remove(struct generic_pm_domain *genpd)
 {
        return -ENOTSUPP;
 }
+
+#define simple_qos_governor            (*(struct dev_power_governor *)(NULL))
+#define pm_domain_always_on_gov                (*(struct dev_power_governor *)(NULL))
 #endif
 
 static inline int pm_genpd_add_device(struct generic_pm_domain *genpd,
index 0f65d36c2a75153c4f0c1b47f2ff351ca5365239..3e2547d6e207c3c5655e6da3b78cd61cb13b7bd5 100644 (file)
@@ -6,7 +6,6 @@
  */
 #include <linux/plist.h>
 #include <linux/notifier.h>
-#include <linux/miscdevice.h>
 #include <linux/device.h>
 #include <linux/workqueue.h>
 
@@ -147,8 +146,6 @@ int dev_pm_qos_add_notifier(struct device *dev,
                            struct notifier_block *notifier);
 int dev_pm_qos_remove_notifier(struct device *dev,
                               struct notifier_block *notifier);
-int dev_pm_qos_add_global_notifier(struct notifier_block *notifier);
-int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier);
 void dev_pm_qos_constraints_init(struct device *dev);
 void dev_pm_qos_constraints_destroy(struct device *dev);
 int dev_pm_qos_add_ancestor_request(struct device *dev,
@@ -200,12 +197,6 @@ static inline int dev_pm_qos_add_notifier(struct device *dev,
 static inline int dev_pm_qos_remove_notifier(struct device *dev,
                                             struct notifier_block *notifier)
                        { return 0; }
-static inline int dev_pm_qos_add_global_notifier(
-                                       struct notifier_block *notifier)
-                       { return 0; }
-static inline int dev_pm_qos_remove_global_notifier(
-                                       struct notifier_block *notifier)
-                       { return 0; }
 static inline void dev_pm_qos_constraints_init(struct device *dev)
 {
        dev->power.power_state = PMSG_ON;
index 51334edec506815a70868dd451d5c72944ed0a59..a39540326417960b586cdba4985cf09c131fe086 100644 (file)
@@ -80,6 +80,7 @@
 /********** kernel/mutexes **********/
 #define MUTEX_DEBUG_INIT       0x11
 #define MUTEX_DEBUG_FREE       0x22
+#define MUTEX_POISON_WW_CTX    ((void *) 0x500 + POISON_POINTER_DELTA)
 
 /********** lib/flex_array.c **********/
 #define FLEX_ARRAY_FREE        0x6c    /* for use-after-free poisoning */
index 62d44c176071833ab644d46a95ec910fb1ee57e7..64aa189efe21d6792b07f2b9005fcf5345b2e98c 100644 (file)
@@ -8,19 +8,9 @@
 #include <linux/alarmtimer.h>
 
 
-static inline unsigned long long cputime_to_expires(cputime_t expires)
-{
-       return (__force unsigned long long)expires;
-}
-
-static inline cputime_t expires_to_cputime(unsigned long long expires)
-{
-       return (__force cputime_t)expires;
-}
-
 struct cpu_timer_list {
        struct list_head entry;
-       unsigned long long expires, incr;
+       u64 expires, incr;
        struct task_struct *task;
        int firing;
 };
@@ -129,7 +119,7 @@ void run_posix_cpu_timers(struct task_struct *task);
 void posix_cpu_timers_exit(struct task_struct *task);
 void posix_cpu_timers_exit_group(struct task_struct *task);
 void set_process_cpu_timer(struct task_struct *task, unsigned int clock_idx,
-                          cputime_t *newval, cputime_t *oldval);
+                          u64 *newval, u64 *oldval);
 
 long clock_nanosleep_restart(struct restart_block *restart_block);
 
index bed9557b69e74150d1110514a45f50fea8a066d3..b312bcef53da9da35df60dd4ddb321556db3a518 100644 (file)
@@ -4,8 +4,16 @@
 enum bq27xxx_chip {
        BQ27000 = 1, /* bq27000, bq27200 */
        BQ27010, /* bq27010, bq27210 */
-       BQ27500, /* bq27500 */
-       BQ27510, /* bq27510, bq27520 */
+       BQ2750X, /* bq27500 deprecated alias */
+       BQ2751X, /* bq27510, bq27520 deprecated alias */
+       BQ27500, /* bq27500/1 */
+       BQ27510G1, /* bq27510G1 */
+       BQ27510G2, /* bq27510G2 */
+       BQ27510G3, /* bq27510G3 */
+       BQ27520G1, /* bq27520G1 */
+       BQ27520G2, /* bq27520G2 */
+       BQ27520G3, /* bq27520G3 */
+       BQ27520G4, /* bq27520G4 */
        BQ27530, /* bq27530, bq27531 */
        BQ27541, /* bq27541, bq27542, bq27546, bq27742 */
        BQ27545, /* bq27545 */
index 2d6f0c39ed6833b96f8aad03d9ed8b2e4d445add..a0522328d7aa5c2e3f94a77dc266cd20301046a7 100644 (file)
@@ -90,9 +90,9 @@
 #define SSSR_RFL_MASK  (0xf << 12)     /* Receive FIFO Level mask */
 
 #define SSCR1_TFT      (0x000003c0)    /* Transmit FIFO Threshold (mask) */
-#define SSCR1_TxTresh(x) (((x) - 1) << 6) /* level [1..16] */
+#define SSCR1_TxTresh(x) (((x) - 1) << 6)      /* level [1..16] */
 #define SSCR1_RFT      (0x00003c00)    /* Receive FIFO Threshold (mask) */
-#define SSCR1_RxTresh(x) (((x) - 1) << 10) /* level [1..16] */
+#define SSCR1_RxTresh(x) (((x) - 1) << 10)     /* level [1..16] */
 
 #define RX_THRESH_CE4100_DFLT  2
 #define TX_THRESH_CE4100_DFLT  2
 #define CE4100_SSCR1_RxTresh(x) (((x) - 1) << 10)      /* level [1..4] */
 
 /* QUARK_X1000 SSCR0 bit definition */
-#define QUARK_X1000_SSCR0_DSS  (0x1F)          /* Data Size Select (mask) */
-#define QUARK_X1000_SSCR0_DataSize(x)  ((x) - 1)       /* Data Size Select [4..32] */
-#define QUARK_X1000_SSCR0_FRF  (0x3 << 5)      /* FRame Format (mask) */
+#define QUARK_X1000_SSCR0_DSS          (0x1F << 0)     /* Data Size Select (mask) */
+#define QUARK_X1000_SSCR0_DataSize(x)  ((x) - 1)       /* Data Size Select [4..32] */
+#define QUARK_X1000_SSCR0_FRF          (0x3 << 5)      /* FRame Format (mask) */
 #define QUARK_X1000_SSCR0_Motorola     (0x0 << 5)      /* Motorola's Serial Peripheral Interface (SPI) */
 
 #define RX_THRESH_QUARK_X1000_DFLT     1
 #define QUARK_X1000_SSCR1_TxTresh(x) (((x) - 1) << 6)  /* level [1..32] */
 #define QUARK_X1000_SSCR1_RFT  (0x1F << 11)    /* Receive FIFO Threshold (mask) */
 #define QUARK_X1000_SSCR1_RxTresh(x) (((x) - 1) << 11) /* level [1..32] */
-#define QUARK_X1000_SSCR1_STRF       (1 << 17)         /* Select FIFO or EFWR */
-#define QUARK_X1000_SSCR1_EFWR (1 << 16)               /* Enable FIFO Write/Read */
+#define QUARK_X1000_SSCR1_STRF (1 << 17)       /* Select FIFO or EFWR */
+#define QUARK_X1000_SSCR1_EFWR (1 << 16)       /* Enable FIFO Write/Read */
 
 /* extra bits in PXA255, PXA26x and PXA27x SSP ports */
 #define SSCR0_TISSP            (1 << 4)        /* TI Sync Serial Protocol */
index 01f71e1d2e941e359fc5fdd07f0645813ef8f845..6ade6a52d9d42b731fef1b68c9f44dd91928f26c 100644 (file)
@@ -1161,5 +1161,17 @@ do { \
                ftrace_dump(oops_dump_mode); \
 } while (0)
 
+/*
+ * Place this after a lock-acquisition primitive to guarantee that
+ * an UNLOCK+LOCK pair acts as a full barrier.  This guarantee applies
+ * if the UNLOCK and LOCK are executed by the same CPU or if the
+ * UNLOCK and LOCK operate on the same lock variable.
+ */
+#ifdef CONFIG_PPC
+#define smp_mb__after_unlock_lock()    smp_mb()  /* Full ordering for lock. */
+#else /* #ifdef CONFIG_PPC */
+#define smp_mb__after_unlock_lock()    do { } while (0)
+#endif /* #else #ifdef CONFIG_PPC */
+
 
 #endif /* __LINUX_RCUPDATE_H */
index ac81e4063b407a6525931ff78cdf51544547646e..4f9b2fa2173d692aac0ec3b4011456a3823105d4 100644 (file)
 
 #include <linux/cache.h>
 
+struct rcu_dynticks;
+static inline int rcu_dynticks_snap(struct rcu_dynticks *rdtp)
+{
+       return 0;
+}
+
 static inline unsigned long get_state_synchronize_rcu(void)
 {
        return 0;
diff --git a/include/linux/rcuwait.h b/include/linux/rcuwait.h
new file mode 100644 (file)
index 0000000..a4ede51
--- /dev/null
@@ -0,0 +1,63 @@
+#ifndef _LINUX_RCUWAIT_H_
+#define _LINUX_RCUWAIT_H_
+
+#include <linux/rcupdate.h>
+
+/*
+ * rcuwait provides a way of blocking and waking up a single
+ * task in an rcu-safe manner; where it is forbidden to use
+ * after exit_notify(). task_struct is not properly rcu protected,
+ * unless dealing with rcu-aware lists, ie: find_task_by_*().
+ *
+ * Alternatively we have task_rcu_dereference(), but the return
+ * semantics have different implications which would break the
+ * wakeup side. The only time @task is non-nil is when a user is
+ * blocked (or checking if it needs to) on a condition, and reset
+ * as soon as we know that the condition has succeeded and are
+ * awoken.
+ */
+struct rcuwait {
+       struct task_struct *task;
+};
+
+#define __RCUWAIT_INITIALIZER(name)            \
+       { .task = NULL, }
+
+static inline void rcuwait_init(struct rcuwait *w)
+{
+       w->task = NULL;
+}
+
+extern void rcuwait_wake_up(struct rcuwait *w);
+
+/*
+ * The caller is responsible for locking around rcuwait_wait_event(),
+ * such that writes to @task are properly serialized.
+ */
+#define rcuwait_wait_event(w, condition)                               \
+({                                                                     \
+       /*                                                              \
+        * Complain if we are called after do_exit()/exit_notify(),     \
+        * as we cannot rely on the rcu critical region for the         \
+        * wakeup side.                                                 \
+        */                                                             \
+       WARN_ON(current->exit_state);                                   \
+                                                                       \
+       rcu_assign_pointer((w)->task, current);                         \
+       for (;;) {                                                      \
+               /*                                                      \
+                * Implicit barrier (A) pairs with (B) in               \
+                * rcuwait_wake_up().                                   \
+                */                                                     \
+               set_current_state(TASK_UNINTERRUPTIBLE);                \
+               if (condition)                                          \
+                       break;                                          \
+                                                                       \
+               schedule();                                             \
+       }                                                               \
+                                                                       \
+       WRITE_ONCE((w)->task, NULL);                                    \
+       __set_current_state(TASK_RUNNING);                              \
+})
+
+#endif /* _LINUX_RCUWAIT_H_ */
diff --git a/include/linux/refcount.h b/include/linux/refcount.h
new file mode 100644 (file)
index 0000000..600aadf
--- /dev/null
@@ -0,0 +1,294 @@
+#ifndef _LINUX_REFCOUNT_H
+#define _LINUX_REFCOUNT_H
+
+/*
+ * Variant of atomic_t specialized for reference counts.
+ *
+ * The interface matches the atomic_t interface (to aid in porting) but only
+ * provides the few functions one should use for reference counting.
+ *
+ * It differs in that the counter saturates at UINT_MAX and will not move once
+ * there. This avoids wrapping the counter and causing 'spurious'
+ * use-after-free issues.
+ *
+ * Memory ordering rules are slightly relaxed wrt regular atomic_t functions
+ * and provide only what is strictly required for refcounts.
+ *
+ * The increments are fully relaxed; these will not provide ordering. The
+ * rationale is that whatever is used to obtain the object we're increasing the
+ * reference count on will provide the ordering. For locked data structures,
+ * its the lock acquire, for RCU/lockless data structures its the dependent
+ * load.
+ *
+ * Do note that inc_not_zero() provides a control dependency which will order
+ * future stores against the inc, this ensures we'll never modify the object
+ * if we did not in fact acquire a reference.
+ *
+ * The decrements will provide release order, such that all the prior loads and
+ * stores will be issued before, it also provides a control dependency, which
+ * will order us against the subsequent free().
+ *
+ * The control dependency is against the load of the cmpxchg (ll/sc) that
+ * succeeded. This means the stores aren't fully ordered, but this is fine
+ * because the 1->0 transition indicates no concurrency.
+ *
+ * Note that the allocator is responsible for ordering things between free()
+ * and alloc().
+ *
+ */
+
+#include <linux/atomic.h>
+#include <linux/bug.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+
+#ifdef CONFIG_DEBUG_REFCOUNT
+#define REFCOUNT_WARN(cond, str) WARN_ON(cond)
+#define __refcount_check       __must_check
+#else
+#define REFCOUNT_WARN(cond, str) (void)(cond)
+#define __refcount_check
+#endif
+
+typedef struct refcount_struct {
+       atomic_t refs;
+} refcount_t;
+
+#define REFCOUNT_INIT(n)       { .refs = ATOMIC_INIT(n), }
+
+static inline void refcount_set(refcount_t *r, unsigned int n)
+{
+       atomic_set(&r->refs, n);
+}
+
+static inline unsigned int refcount_read(const refcount_t *r)
+{
+       return atomic_read(&r->refs);
+}
+
+static inline __refcount_check
+bool refcount_add_not_zero(unsigned int i, refcount_t *r)
+{
+       unsigned int old, new, val = atomic_read(&r->refs);
+
+       for (;;) {
+               if (!val)
+                       return false;
+
+               if (unlikely(val == UINT_MAX))
+                       return true;
+
+               new = val + i;
+               if (new < val)
+                       new = UINT_MAX;
+               old = atomic_cmpxchg_relaxed(&r->refs, val, new);
+               if (old == val)
+                       break;
+
+               val = old;
+       }
+
+       REFCOUNT_WARN(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
+
+       return true;
+}
+
+static inline void refcount_add(unsigned int i, refcount_t *r)
+{
+       REFCOUNT_WARN(!refcount_add_not_zero(i, r), "refcount_t: addition on 0; use-after-free.\n");
+}
+
+/*
+ * Similar to atomic_inc_not_zero(), will saturate at UINT_MAX and WARN.
+ *
+ * Provides no memory ordering, it is assumed the caller has guaranteed the
+ * object memory to be stable (RCU, etc.). It does provide a control dependency
+ * and thereby orders future stores. See the comment on top.
+ */
+static inline __refcount_check
+bool refcount_inc_not_zero(refcount_t *r)
+{
+       unsigned int old, new, val = atomic_read(&r->refs);
+
+       for (;;) {
+               new = val + 1;
+
+               if (!val)
+                       return false;
+
+               if (unlikely(!new))
+                       return true;
+
+               old = atomic_cmpxchg_relaxed(&r->refs, val, new);
+               if (old == val)
+                       break;
+
+               val = old;
+       }
+
+       REFCOUNT_WARN(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
+
+       return true;
+}
+
+/*
+ * Similar to atomic_inc(), will saturate at UINT_MAX and WARN.
+ *
+ * Provides no memory ordering, it is assumed the caller already has a
+ * reference on the object, will WARN when this is not so.
+ */
+static inline void refcount_inc(refcount_t *r)
+{
+       REFCOUNT_WARN(!refcount_inc_not_zero(r), "refcount_t: increment on 0; use-after-free.\n");
+}
+
+/*
+ * Similar to atomic_dec_and_test(), it will WARN on underflow and fail to
+ * decrement when saturated at UINT_MAX.
+ *
+ * Provides release memory ordering, such that prior loads and stores are done
+ * before, and provides a control dependency such that free() must come after.
+ * See the comment on top.
+ */
+static inline __refcount_check
+bool refcount_sub_and_test(unsigned int i, refcount_t *r)
+{
+       unsigned int old, new, val = atomic_read(&r->refs);
+
+       for (;;) {
+               if (unlikely(val == UINT_MAX))
+                       return false;
+
+               new = val - i;
+               if (new > val) {
+                       REFCOUNT_WARN(new > val, "refcount_t: underflow; use-after-free.\n");
+                       return false;
+               }
+
+               old = atomic_cmpxchg_release(&r->refs, val, new);
+               if (old == val)
+                       break;
+
+               val = old;
+       }
+
+       return !new;
+}
+
+static inline __refcount_check
+bool refcount_dec_and_test(refcount_t *r)
+{
+       return refcount_sub_and_test(1, r);
+}
+
+/*
+ * Similar to atomic_dec(), it will WARN on underflow and fail to decrement
+ * when saturated at UINT_MAX.
+ *
+ * Provides release memory ordering, such that prior loads and stores are done
+ * before.
+ */
+static inline
+void refcount_dec(refcount_t *r)
+{
+       REFCOUNT_WARN(refcount_dec_and_test(r), "refcount_t: decrement hit 0; leaking memory.\n");
+}
+
+/*
+ * No atomic_t counterpart, it attempts a 1 -> 0 transition and returns the
+ * success thereof.
+ *
+ * Like all decrement operations, it provides release memory order and provides
+ * a control dependency.
+ *
+ * It can be used like a try-delete operator; this explicit case is provided
+ * and not cmpxchg in generic, because that would allow implementing unsafe
+ * operations.
+ */
+static inline __refcount_check
+bool refcount_dec_if_one(refcount_t *r)
+{
+       return atomic_cmpxchg_release(&r->refs, 1, 0) == 1;
+}
+
+/*
+ * No atomic_t counterpart, it decrements unless the value is 1, in which case
+ * it will return false.
+ *
+ * Was often done like: atomic_add_unless(&var, -1, 1)
+ */
+static inline __refcount_check
+bool refcount_dec_not_one(refcount_t *r)
+{
+       unsigned int old, new, val = atomic_read(&r->refs);
+
+       for (;;) {
+               if (unlikely(val == UINT_MAX))
+                       return true;
+
+               if (val == 1)
+                       return false;
+
+               new = val - 1;
+               if (new > val) {
+                       REFCOUNT_WARN(new > val, "refcount_t: underflow; use-after-free.\n");
+                       return true;
+               }
+
+               old = atomic_cmpxchg_release(&r->refs, val, new);
+               if (old == val)
+                       break;
+
+               val = old;
+       }
+
+       return true;
+}
+
+/*
+ * Similar to atomic_dec_and_mutex_lock(), it will WARN on underflow and fail
+ * to decrement when saturated at UINT_MAX.
+ *
+ * Provides release memory ordering, such that prior loads and stores are done
+ * before, and provides a control dependency such that free() must come after.
+ * See the comment on top.
+ */
+static inline __refcount_check
+bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock)
+{
+       if (refcount_dec_not_one(r))
+               return false;
+
+       mutex_lock(lock);
+       if (!refcount_dec_and_test(r)) {
+               mutex_unlock(lock);
+               return false;
+       }
+
+       return true;
+}
+
+/*
+ * Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to
+ * decrement when saturated at UINT_MAX.
+ *
+ * Provides release memory ordering, such that prior loads and stores are done
+ * before, and provides a control dependency such that free() must come after.
+ * See the comment on top.
+ */
+static inline __refcount_check
+bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock)
+{
+       if (refcount_dec_not_one(r))
+               return false;
+
+       spin_lock(lock);
+       if (!refcount_dec_and_test(r)) {
+               spin_unlock(lock);
+               return false;
+       }
+
+       return true;
+}
+
+#endif /* _LINUX_REFCOUNT_H */
index f6673132431d09c3caa0c1394286fb310c93f9c1..e88649225a6073fea86bc77d99bdf3109fe7838d 100644 (file)
@@ -40,12 +40,13 @@ enum regcache_type {
 };
 
 /**
- * Default value for a register.  We use an array of structs rather
- * than a simple array as many modern devices have very sparse
- * register maps.
+ * struct reg_default - Default value for a register.
  *
  * @reg: Register address.
  * @def: Register default value.
+ *
+ * We use an array of structs rather than a simple array as many modern devices
+ * have very sparse register maps.
  */
 struct reg_default {
        unsigned int reg;
@@ -53,12 +54,14 @@ struct reg_default {
 };
 
 /**
- * Register/value pairs for sequences of writes with an optional delay in
- * microseconds to be applied after each write.
+ * struct reg_sequence - An individual write from a sequence of writes.
  *
  * @reg: Register address.
  * @def: Register value.
  * @delay_us: Delay to be applied after the register write in microseconds
+ *
+ * Register/value pairs for sequences of writes with an optional delay in
+ * microseconds to be applied after each write.
  */
 struct reg_sequence {
        unsigned int reg;
@@ -98,6 +101,7 @@ struct reg_sequence {
 
 /**
  * regmap_read_poll_timeout - Poll until a condition is met or a timeout occurs
+ *
  * @map: Regmap to read from
  * @addr: Address to poll
  * @val: Unsigned integer variable to read the value into
@@ -146,8 +150,8 @@ enum regmap_endian {
 };
 
 /**
- * A register range, used for access related checks
- * (readable/writeable/volatile/precious checks)
+ * struct regmap_range - A register range, used for access related checks
+ *                       (readable/writeable/volatile/precious checks)
  *
  * @range_min: address of first register
  * @range_max: address of last register
@@ -159,16 +163,18 @@ struct regmap_range {
 
 #define regmap_reg_range(low, high) { .range_min = low, .range_max = high, }
 
-/*
- * A table of ranges including some yes ranges and some no ranges.
- * If a register belongs to a no_range, the corresponding check function
- * will return false. If a register belongs to a yes range, the corresponding
- * check function will return true. "no_ranges" are searched first.
+/**
+ * struct regmap_access_table - A table of register ranges for access checks
  *
  * @yes_ranges : pointer to an array of regmap ranges used as "yes ranges"
  * @n_yes_ranges: size of the above array
  * @no_ranges: pointer to an array of regmap ranges used as "no ranges"
  * @n_no_ranges: size of the above array
+ *
+ * A table of ranges including some yes ranges and some no ranges.
+ * If a register belongs to a no_range, the corresponding check function
+ * will return false. If a register belongs to a yes range, the corresponding
+ * check function will return true. "no_ranges" are searched first.
  */
 struct regmap_access_table {
        const struct regmap_range *yes_ranges;
@@ -181,7 +187,7 @@ typedef void (*regmap_lock)(void *);
 typedef void (*regmap_unlock)(void *);
 
 /**
- * Configuration for the register map of a device.
+ * struct regmap_config - Configuration for the register map of a device.
  *
  * @name: Optional name of the regmap. Useful when a device has multiple
  *        register regions.
@@ -314,22 +320,24 @@ struct regmap_config {
 };
 
 /**
- * Configuration for indirectly accessed or paged registers.
- * Registers, mapped to this virtual range, are accessed in two steps:
- *     1. page selector register update;
- *     2. access through data window registers.
+ * struct regmap_range_cfg - Configuration for indirectly accessed or paged
+ *                           registers.
  *
  * @name: Descriptive name for diagnostics
  *
  * @range_min: Address of the lowest register address in virtual range.
  * @range_max: Address of the highest register in virtual range.
  *
- * @page_sel_reg: Register with selector field.
- * @page_sel_mask: Bit shift for selector value.
- * @page_sel_shift: Bit mask for selector value.
+ * @selector_reg: Register with selector field.
+ * @selector_mask: Bit shift for selector value.
+ * @selector_shift: Bit mask for selector value.
  *
  * @window_start: Address of first (lowest) register in data window.
  * @window_len: Number of registers in data window.
+ *
+ * Registers, mapped to this virtual range, are accessed in two steps:
+ *     1. page selector register update;
+ *     2. access through data window registers.
  */
 struct regmap_range_cfg {
        const char *name;
@@ -372,7 +380,8 @@ typedef struct regmap_async *(*regmap_hw_async_alloc)(void);
 typedef void (*regmap_hw_free_context)(void *context);
 
 /**
- * Description of a hardware bus for the register map infrastructure.
+ * struct regmap_bus - Description of a hardware bus for the register map
+ *                     infrastructure.
  *
  * @fast_io: Register IO is fast. Use a spinlock instead of a mutex
  *          to perform locking. This field is ignored if custom lock/unlock
@@ -385,6 +394,10 @@ typedef void (*regmap_hw_free_context)(void *context);
  *               must serialise with respect to non-async I/O.
  * @reg_write: Write a single register value to the given register address. This
  *             write operation has to complete when returning from the function.
+ * @reg_update_bits: Update bits operation to be used against volatile
+ *                   registers, intended for devices supporting some mechanism
+ *                   for setting clearing bits without having to
+ *                   read/modify/write.
  * @read: Read operation.  Data is returned in the buffer used to transmit
  *         data.
  * @reg_read: Read a single register value from a given register address.
@@ -514,7 +527,7 @@ struct regmap *__devm_regmap_init_ac97(struct snd_ac97 *ac97,
 #endif
 
 /**
- * regmap_init(): Initialise register map
+ * regmap_init() - Initialise register map
  *
  * @dev: Device that will be interacted with
  * @bus: Bus-specific callbacks to use with device
@@ -532,7 +545,7 @@ int regmap_attach_dev(struct device *dev, struct regmap *map,
                      const struct regmap_config *config);
 
 /**
- * regmap_init_i2c(): Initialise register map
+ * regmap_init_i2c() - Initialise register map
  *
  * @i2c: Device that will be interacted with
  * @config: Configuration for register map
@@ -545,9 +558,9 @@ int regmap_attach_dev(struct device *dev, struct regmap *map,
                                i2c, config)
 
 /**
- * regmap_init_spi(): Initialise register map
+ * regmap_init_spi() - Initialise register map
  *
- * @spi: Device that will be interacted with
+ * @dev: Device that will be interacted with
  * @config: Configuration for register map
  *
  * The return value will be an ERR_PTR() on error or a valid pointer to
@@ -558,8 +571,9 @@ int regmap_attach_dev(struct device *dev, struct regmap *map,
                                dev, config)
 
 /**
- * regmap_init_spmi_base(): Create regmap for the Base register space
- * @sdev:      SPMI device that will be interacted with
+ * regmap_init_spmi_base() - Create regmap for the Base register space
+ *
+ * @dev:       SPMI device that will be interacted with
  * @config:    Configuration for register map
  *
  * The return value will be an ERR_PTR() on error or a valid pointer to
@@ -570,8 +584,9 @@ int regmap_attach_dev(struct device *dev, struct regmap *map,
                                dev, config)
 
 /**
- * regmap_init_spmi_ext(): Create regmap for Ext register space
- * @sdev:      Device that will be interacted with
+ * regmap_init_spmi_ext() - Create regmap for Ext register space
+ *
+ * @dev:       Device that will be interacted with
  * @config:    Configuration for register map
  *
  * The return value will be an ERR_PTR() on error or a valid pointer to
@@ -582,7 +597,7 @@ int regmap_attach_dev(struct device *dev, struct regmap *map,
                                dev, config)
 
 /**
- * regmap_init_mmio_clk(): Initialise register map with register clock
+ * regmap_init_mmio_clk() - Initialise register map with register clock
  *
  * @dev: Device that will be interacted with
  * @clk_id: register clock consumer ID
@@ -597,7 +612,7 @@ int regmap_attach_dev(struct device *dev, struct regmap *map,
                                dev, clk_id, regs, config)
 
 /**
- * regmap_init_mmio(): Initialise register map
+ * regmap_init_mmio() - Initialise register map
  *
  * @dev: Device that will be interacted with
  * @regs: Pointer to memory-mapped IO region
@@ -610,7 +625,7 @@ int regmap_attach_dev(struct device *dev, struct regmap *map,
        regmap_init_mmio_clk(dev, NULL, regs, config)
 
 /**
- * regmap_init_ac97(): Initialise AC'97 register map
+ * regmap_init_ac97() - Initialise AC'97 register map
  *
  * @ac97: Device that will be interacted with
  * @config: Configuration for register map
@@ -624,7 +639,7 @@ int regmap_attach_dev(struct device *dev, struct regmap *map,
 bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
 
 /**
- * devm_regmap_init(): Initialise managed register map
+ * devm_regmap_init() - Initialise managed register map
  *
  * @dev: Device that will be interacted with
  * @bus: Bus-specific callbacks to use with device
@@ -641,7 +656,7 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
                                dev, bus, bus_context, config)
 
 /**
- * devm_regmap_init_i2c(): Initialise managed register map
+ * devm_regmap_init_i2c() - Initialise managed register map
  *
  * @i2c: Device that will be interacted with
  * @config: Configuration for register map
@@ -655,9 +670,9 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
                                i2c, config)
 
 /**
- * devm_regmap_init_spi(): Initialise register map
+ * devm_regmap_init_spi() - Initialise register map
  *
- * @spi: Device that will be interacted with
+ * @dev: Device that will be interacted with
  * @config: Configuration for register map
  *
  * The return value will be an ERR_PTR() on error or a valid pointer
@@ -669,8 +684,9 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
                                dev, config)
 
 /**
- * devm_regmap_init_spmi_base(): Create managed regmap for Base register space
- * @sdev:      SPMI device that will be interacted with
+ * devm_regmap_init_spmi_base() - Create managed regmap for Base register space
+ *
+ * @dev:       SPMI device that will be interacted with
  * @config:    Configuration for register map
  *
  * The return value will be an ERR_PTR() on error or a valid pointer
@@ -682,8 +698,9 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
                                dev, config)
 
 /**
- * devm_regmap_init_spmi_ext(): Create managed regmap for Ext register space
- * @sdev:      SPMI device that will be interacted with
+ * devm_regmap_init_spmi_ext() - Create managed regmap for Ext register space
+ *
+ * @dev:       SPMI device that will be interacted with
  * @config:    Configuration for register map
  *
  * The return value will be an ERR_PTR() on error or a valid pointer
@@ -695,7 +712,7 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
                                dev, config)
 
 /**
- * devm_regmap_init_mmio_clk(): Initialise managed register map with clock
+ * devm_regmap_init_mmio_clk() - Initialise managed register map with clock
  *
  * @dev: Device that will be interacted with
  * @clk_id: register clock consumer ID
@@ -711,7 +728,7 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
                                dev, clk_id, regs, config)
 
 /**
- * devm_regmap_init_mmio(): Initialise managed register map
+ * devm_regmap_init_mmio() - Initialise managed register map
  *
  * @dev: Device that will be interacted with
  * @regs: Pointer to memory-mapped IO region
@@ -725,7 +742,7 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
        devm_regmap_init_mmio_clk(dev, NULL, regs, config)
 
 /**
- * devm_regmap_init_ac97(): Initialise AC'97 register map
+ * devm_regmap_init_ac97() - Initialise AC'97 register map
  *
  * @ac97: Device that will be interacted with
  * @config: Configuration for register map
@@ -800,7 +817,7 @@ bool regmap_reg_in_ranges(unsigned int reg,
                          unsigned int nranges);
 
 /**
- * Description of an register field
+ * struct reg_field - Description of an register field
  *
  * @reg: Offset of the register within the regmap bank
  * @lsb: lsb of the register field.
@@ -841,7 +858,7 @@ int regmap_fields_update_bits_base(struct regmap_field *field,  unsigned int id,
                                   bool *change, bool async, bool force);
 
 /**
- * Description of an IRQ for the generic regmap irq_chip.
+ * struct regmap_irq - Description of an IRQ for the generic regmap irq_chip.
  *
  * @reg_offset: Offset of the status/mask register within the bank
  * @mask:       Mask used to flag/control the register.
@@ -861,9 +878,7 @@ struct regmap_irq {
        [_irq] = { .reg_offset = (_off), .mask = (_mask) }
 
 /**
- * Description of a generic regmap irq_chip.  This is not intended to
- * handle every possible interrupt controller, but it should handle a
- * substantial proportion of those that are found in the wild.
+ * struct regmap_irq_chip - Description of a generic regmap irq_chip.
  *
  * @name:        Descriptive name for IRQ controller.
  *
@@ -897,6 +912,10 @@ struct regmap_irq {
  *                  after handling the interrupts in regmap_irq_handler().
  * @irq_drv_data:    Driver specific IRQ data which is passed as parameter when
  *                  driver specific pre/post interrupt handler is called.
+ *
+ * This is not intended to handle every possible interrupt controller, but
+ * it should handle a substantial proportion of those that are found in the
+ * wild.
  */
 struct regmap_irq_chip {
        const char *name;
index ad3ec9ec61f7b6de743b5d6de4225defcc18be99..c8e519d0b4a35153c823c6ee172fd7ca26f43402 100644 (file)
@@ -29,7 +29,6 @@ struct sched_param {
 
 #include <asm/page.h>
 #include <asm/ptrace.h>
-#include <linux/cputime.h>
 
 #include <linux/smp.h>
 #include <linux/sem.h>
@@ -227,7 +226,7 @@ extern void proc_sched_set_task(struct task_struct *p);
 extern char ___assert_task_state[1 - 2*!!(
                sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
 
-/* Convenience macros for the sake of set_task_state */
+/* Convenience macros for the sake of set_current_state */
 #define TASK_KILLABLE          (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
 #define TASK_STOPPED           (TASK_WAKEKILL | __TASK_STOPPED)
 #define TASK_TRACED            (TASK_WAKEKILL | __TASK_TRACED)
@@ -254,17 +253,6 @@ extern char ___assert_task_state[1 - 2*!!(
 
 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
 
-#define __set_task_state(tsk, state_value)                     \
-       do {                                                    \
-               (tsk)->task_state_change = _THIS_IP_;           \
-               (tsk)->state = (state_value);                   \
-       } while (0)
-#define set_task_state(tsk, state_value)                       \
-       do {                                                    \
-               (tsk)->task_state_change = _THIS_IP_;           \
-               smp_store_mb((tsk)->state, (state_value));      \
-       } while (0)
-
 #define __set_current_state(state_value)                       \
        do {                                                    \
                current->task_state_change = _THIS_IP_;         \
@@ -277,20 +265,6 @@ extern char ___assert_task_state[1 - 2*!!(
        } while (0)
 
 #else
-
-/*
- * @tsk had better be current, or you get to keep the pieces.
- *
- * The only reason is that computing current can be more expensive than
- * using a pointer that's already available.
- *
- * Therefore, see set_current_state().
- */
-#define __set_task_state(tsk, state_value)             \
-       do { (tsk)->state = (state_value); } while (0)
-#define set_task_state(tsk, state_value)               \
-       smp_store_mb((tsk)->state, (state_value))
-
 /*
  * set_current_state() includes a barrier so that the write of current->state
  * is correctly serialised wrt the caller's subsequent test of whether to
@@ -461,12 +435,10 @@ extern signed long schedule_timeout_idle(signed long timeout);
 asmlinkage void schedule(void);
 extern void schedule_preempt_disabled(void);
 
+extern int __must_check io_schedule_prepare(void);
+extern void io_schedule_finish(int token);
 extern long io_schedule_timeout(long timeout);
-
-static inline void io_schedule(void)
-{
-       io_schedule_timeout(MAX_SCHEDULE_TIMEOUT);
-}
+extern void io_schedule(void);
 
 void __noreturn do_task_dead(void);
 
@@ -565,15 +537,13 @@ struct pacct_struct {
        int                     ac_flag;
        long                    ac_exitcode;
        unsigned long           ac_mem;
-       cputime_t               ac_utime, ac_stime;
+       u64                     ac_utime, ac_stime;
        unsigned long           ac_minflt, ac_majflt;
 };
 
 struct cpu_itimer {
-       cputime_t expires;
-       cputime_t incr;
-       u32 error;
-       u32 incr_error;
+       u64 expires;
+       u64 incr;
 };
 
 /**
@@ -587,8 +557,8 @@ struct cpu_itimer {
  */
 struct prev_cputime {
 #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
-       cputime_t utime;
-       cputime_t stime;
+       u64 utime;
+       u64 stime;
        raw_spinlock_t lock;
 #endif
 };
@@ -603,8 +573,8 @@ static inline void prev_cputime_init(struct prev_cputime *prev)
 
 /**
  * struct task_cputime - collected CPU time counts
- * @utime:             time spent in user mode, in &cputime_t units
- * @stime:             time spent in kernel mode, in &cputime_t units
+ * @utime:             time spent in user mode, in nanoseconds
+ * @stime:             time spent in kernel mode, in nanoseconds
  * @sum_exec_runtime:  total time spent on the CPU, in nanoseconds
  *
  * This structure groups together three kinds of CPU time that are tracked for
@@ -612,8 +582,8 @@ static inline void prev_cputime_init(struct prev_cputime *prev)
  * these counts together and treat all three of them in parallel.
  */
 struct task_cputime {
-       cputime_t utime;
-       cputime_t stime;
+       u64 utime;
+       u64 stime;
        unsigned long long sum_exec_runtime;
 };
 
@@ -622,13 +592,6 @@ struct task_cputime {
 #define prof_exp       stime
 #define sched_exp      sum_exec_runtime
 
-#define INIT_CPUTIME   \
-       (struct task_cputime) {                                 \
-               .utime = 0,                                     \
-               .stime = 0,                                     \
-               .sum_exec_runtime = 0,                          \
-       }
-
 /*
  * This is the atomic variant of task_cputime, which can be used for
  * storing and updating task_cputime statistics without locking.
@@ -734,13 +697,14 @@ struct signal_struct {
        unsigned int            is_child_subreaper:1;
        unsigned int            has_child_subreaper:1;
 
+#ifdef CONFIG_POSIX_TIMERS
+
        /* POSIX.1b Interval Timers */
        int                     posix_timer_id;
        struct list_head        posix_timers;
 
        /* ITIMER_REAL timer for the process */
        struct hrtimer real_timer;
-       struct pid *leader_pid;
        ktime_t it_real_incr;
 
        /*
@@ -759,12 +723,16 @@ struct signal_struct {
        /* Earliest-expiration cache. */
        struct task_cputime cputime_expires;
 
+       struct list_head cpu_timers[3];
+
+#endif
+
+       struct pid *leader_pid;
+
 #ifdef CONFIG_NO_HZ_FULL
        atomic_t tick_dep_mask;
 #endif
 
-       struct list_head cpu_timers[3];
-
        struct pid *tty_old_pgrp;
 
        /* boolean value for session group leader */
@@ -782,9 +750,9 @@ struct signal_struct {
         * in __exit_signal, except for the group leader.
         */
        seqlock_t stats_lock;
-       cputime_t utime, stime, cutime, cstime;
-       cputime_t gtime;
-       cputime_t cgtime;
+       u64 utime, stime, cutime, cstime;
+       u64 gtime;
+       u64 cgtime;
        struct prev_cputime prev_cputime;
        unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
        unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
@@ -1025,8 +993,8 @@ enum cpu_idle_type {
  *
  * The DEFINE_WAKE_Q macro declares and initializes the list head.
  * wake_up_q() does NOT reinitialize the list; it's expected to be
- * called near the end of a function, where the fact that the queue is
- * not used again will be easy to see by inspection.
+ * called near the end of a function. Otherwise, the list can be
+ * re-initialized for later re-use by wake_q_init().
  *
  * Note that this can cause spurious wakeups. schedule() callers
  * must ensure the call is done inside a loop, confirming that the
@@ -1046,6 +1014,12 @@ struct wake_q_head {
 #define DEFINE_WAKE_Q(name)                            \
        struct wake_q_head name = { WAKE_Q_TAIL, &name.first }
 
+static inline void wake_q_init(struct wake_q_head *head)
+{
+       head->first = WAKE_Q_TAIL;
+       head->lastp = &head->first;
+}
+
 extern void wake_q_add(struct wake_q_head *head,
                       struct task_struct *task);
 extern void wake_up_q(struct wake_q_head *head);
@@ -1663,11 +1637,11 @@ struct task_struct {
        int __user *set_child_tid;              /* CLONE_CHILD_SETTID */
        int __user *clear_child_tid;            /* CLONE_CHILD_CLEARTID */
 
-       cputime_t utime, stime;
+       u64 utime, stime;
 #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
-       cputime_t utimescaled, stimescaled;
+       u64 utimescaled, stimescaled;
 #endif
-       cputime_t gtime;
+       u64 gtime;
        struct prev_cputime prev_cputime;
 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
        seqcount_t vtime_seqcount;
@@ -1691,8 +1665,10 @@ struct task_struct {
 /* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
        unsigned long min_flt, maj_flt;
 
+#ifdef CONFIG_POSIX_TIMERS
        struct task_cputime cputime_expires;
        struct list_head cpu_timers[3];
+#endif
 
 /* process credentials */
        const struct cred __rcu *ptracer_cred; /* Tracer's credentials at attach */
@@ -1817,7 +1793,7 @@ struct task_struct {
 #if defined(CONFIG_TASK_XACCT)
        u64 acct_rss_mem1;      /* accumulated rss usage */
        u64 acct_vm_mem1;       /* accumulated virtual memory usage */
-       cputime_t acct_timexpd; /* stime + utime since last update */
+       u64 acct_timexpd;       /* stime + utime since last update */
 #endif
 #ifdef CONFIG_CPUSETS
        nodemask_t mems_allowed;        /* Protected by alloc_lock */
@@ -2262,17 +2238,17 @@ struct task_struct *try_get_task_struct(struct task_struct **ptask);
 
 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
 extern void task_cputime(struct task_struct *t,
-                        cputime_t *utime, cputime_t *stime);
-extern cputime_t task_gtime(struct task_struct *t);
+                        u64 *utime, u64 *stime);
+extern u64 task_gtime(struct task_struct *t);
 #else
 static inline void task_cputime(struct task_struct *t,
-                               cputime_t *utime, cputime_t *stime)
+                               u64 *utime, u64 *stime)
 {
        *utime = t->utime;
        *stime = t->stime;
 }
 
-static inline cputime_t task_gtime(struct task_struct *t)
+static inline u64 task_gtime(struct task_struct *t)
 {
        return t->gtime;
 }
@@ -2280,23 +2256,23 @@ static inline cputime_t task_gtime(struct task_struct *t)
 
 #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
 static inline void task_cputime_scaled(struct task_struct *t,
-                                      cputime_t *utimescaled,
-                                      cputime_t *stimescaled)
+                                      u64 *utimescaled,
+                                      u64 *stimescaled)
 {
        *utimescaled = t->utimescaled;
        *stimescaled = t->stimescaled;
 }
 #else
 static inline void task_cputime_scaled(struct task_struct *t,
-                                      cputime_t *utimescaled,
-                                      cputime_t *stimescaled)
+                                      u64 *utimescaled,
+                                      u64 *stimescaled)
 {
        task_cputime(t, utimescaled, stimescaled);
 }
 #endif
 
-extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
-extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
+extern void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st);
+extern void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st);
 
 /*
  * Per process flags
@@ -2515,10 +2491,18 @@ extern u64 sched_clock_cpu(int cpu);
 extern void sched_clock_init(void);
 
 #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
+static inline void sched_clock_init_late(void)
+{
+}
+
 static inline void sched_clock_tick(void)
 {
 }
 
+static inline void clear_sched_clock_stable(void)
+{
+}
+
 static inline void sched_clock_idle_sleep_event(void)
 {
 }
@@ -2537,6 +2521,7 @@ static inline u64 local_clock(void)
        return sched_clock();
 }
 #else
+extern void sched_clock_init_late(void);
 /*
  * Architectures can set this to 1 if they have specified
  * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
@@ -2544,7 +2529,6 @@ static inline u64 local_clock(void)
  * is reliable after all:
  */
 extern int sched_clock_stable(void);
-extern void set_sched_clock_stable(void);
 extern void clear_sched_clock_stable(void);
 
 extern void sched_clock_tick(void);
index 441145351301bc8b57454cc292cc810caf7b6413..49308e142aaeb1cd118336e13d6ebdc30fed75f2 100644 (file)
@@ -59,6 +59,7 @@ extern unsigned int sysctl_sched_cfs_bandwidth_slice;
 extern unsigned int sysctl_sched_autogroup_enabled;
 #endif
 
+extern int sysctl_sched_rr_timeslice;
 extern int sched_rr_timeslice;
 
 extern int sched_rr_handler(struct ctl_table *table, int write,
index 47dd0cebd2045feedb7461ab149df8bfa043d1bd..59248dcc6ef3493388eb8c78ba241eea7bcb6a4d 100644 (file)
@@ -180,8 +180,6 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
 # define raw_spin_lock_nested(lock, subclass) \
        _raw_spin_lock_nested(lock, subclass)
-# define raw_spin_lock_bh_nested(lock, subclass) \
-       _raw_spin_lock_bh_nested(lock, subclass)
 
 # define raw_spin_lock_nest_lock(lock, nest_lock)                      \
         do {                                                           \
@@ -197,7 +195,6 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
 # define raw_spin_lock_nested(lock, subclass)          \
        _raw_spin_lock(((void)(subclass), (lock)))
 # define raw_spin_lock_nest_lock(lock, nest_lock)      _raw_spin_lock(lock)
-# define raw_spin_lock_bh_nested(lock, subclass)       _raw_spin_lock_bh(lock)
 #endif
 
 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
@@ -317,11 +314,6 @@ do {                                                               \
        raw_spin_lock_nested(spinlock_check(lock), subclass);   \
 } while (0)
 
-#define spin_lock_bh_nested(lock, subclass)                    \
-do {                                                           \
-       raw_spin_lock_bh_nested(spinlock_check(lock), subclass);\
-} while (0)
-
 #define spin_lock_nest_lock(lock, nest_lock)                           \
 do {                                                                   \
        raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock);       \
index 5344268e6e62fe7db66e2e305829309e968c4d2f..42dfab89e740aeb08de1896e491607789eacda4b 100644 (file)
@@ -22,8 +22,6 @@ int in_lock_functions(unsigned long addr);
 void __lockfunc _raw_spin_lock(raw_spinlock_t *lock)           __acquires(lock);
 void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
                                                                __acquires(lock);
-void __lockfunc _raw_spin_lock_bh_nested(raw_spinlock_t *lock, int subclass)
-                                                               __acquires(lock);
 void __lockfunc
 _raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map)
                                                                __acquires(lock);
index d3afef9d8dbe705b8270f653056e2f684441c7b2..d0d188861ad69a1b574ea6ce96fb8dfaa6a59c3d 100644 (file)
@@ -57,7 +57,6 @@
 
 #define _raw_spin_lock(lock)                   __LOCK(lock)
 #define _raw_spin_lock_nested(lock, subclass)  __LOCK(lock)
-#define _raw_spin_lock_bh_nested(lock, subclass) __LOCK(lock)
 #define _raw_read_lock(lock)                   __LOCK(lock)
 #define _raw_write_lock(lock)                  __LOCK(lock)
 #define _raw_spin_lock_bh(lock)                        __LOCK_BH(lock)
index dc8eb63c6568a8b0d2b9a2d793f80f255f3fc570..a598cf3ac70ca686ca60c77dbdfb986a554993af 100644 (file)
@@ -33,9 +33,9 @@
 #include <linux/rcupdate.h>
 #include <linux/workqueue.h>
 
-struct srcu_struct_array {
-       unsigned long c[2];
-       unsigned long seq[2];
+struct srcu_array {
+       unsigned long lock_count[2];
+       unsigned long unlock_count[2];
 };
 
 struct rcu_batch {
@@ -46,7 +46,7 @@ struct rcu_batch {
 
 struct srcu_struct {
        unsigned long completed;
-       struct srcu_struct_array __percpu *per_cpu_ref;
+       struct srcu_array __percpu *per_cpu_ref;
        spinlock_t queue_lock; /* protect ->batch_queue, ->running */
        bool running;
        /* callbacks just queued */
@@ -118,7 +118,7 @@ void process_srcu(struct work_struct *work);
  * See include/linux/percpu-defs.h for the rules on per-CPU variables.
  */
 #define __DEFINE_SRCU(name, is_static)                                 \
-       static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\
+       static DEFINE_PER_CPU(struct srcu_array, name##_srcu_array);\
        is_static struct srcu_struct name = __SRCU_STRUCT_INIT(name)
 #define DEFINE_SRCU(name)              __DEFINE_SRCU(name, /* not static */)
 #define DEFINE_STATIC_SRCU(name)       __DEFINE_SRCU(name, static)
index 62a60eeacb0aeaf9907a5247d42706851fec3ffb..8a511c0985aafe0a18722c9dd701cf8326dcad59 100644 (file)
@@ -198,7 +198,7 @@ static inline struct cache_head  *cache_get(struct cache_head *h)
 
 static inline void cache_put(struct cache_head *h, struct cache_detail *cd)
 {
-       if (atomic_read(&h->ref.refcount) <= 2 &&
+       if (kref_read(&h->ref) <= 2 &&
            h->expiry_time < cd->nextcheck)
                cd->nextcheck = h->expiry_time;
        kref_put(&h->ref, cd->cache_put);
index 85cc819676e81d9f0e85331a17f70b271cd680d1..333ad11b3dd9cf11a8ee2fc2f10bece31c543444 100644 (file)
@@ -216,5 +216,6 @@ void rpc_clnt_xprt_switch_put(struct rpc_clnt *);
 void rpc_clnt_xprt_switch_add_xprt(struct rpc_clnt *, struct rpc_xprt *);
 bool rpc_clnt_xprt_switch_has_addr(struct rpc_clnt *clnt,
                        const struct sockaddr *sap);
+void rpc_cleanup_clids(void);
 #endif /* __KERNEL__ */
 #endif /* _LINUX_SUNRPC_CLNT_H */
index 0c729c3c85499a1e51f79ac7252d60d548787fb4..d9718378a8bee0b327d08c2e80a6fd3b5490b967 100644 (file)
@@ -194,8 +194,6 @@ struct platform_freeze_ops {
 };
 
 #ifdef CONFIG_SUSPEND
-extern suspend_state_t mem_sleep_default;
-
 /**
  * suspend_set_ops - set platform dependent suspend operations
  * @ops: The new suspend operations to set.
index 51d601f192d421c6e3721c6621d7c3e08dc1a54d..5a209b84fd9e48e937d6d5c44592038f3f65276d 100644 (file)
@@ -20,11 +20,6 @@ struct timer_list {
        unsigned long           data;
        u32                     flags;
 
-#ifdef CONFIG_TIMER_STATS
-       int                     start_pid;
-       void                    *start_site;
-       char                    start_comm[16];
-#endif
 #ifdef CONFIG_LOCKDEP
        struct lockdep_map      lockdep_map;
 #endif
@@ -197,46 +192,6 @@ extern int mod_timer_pending(struct timer_list *timer, unsigned long expires);
  */
 #define NEXT_TIMER_MAX_DELTA   ((1UL << 30) - 1)
 
-/*
- * Timer-statistics info:
- */
-#ifdef CONFIG_TIMER_STATS
-
-extern int timer_stats_active;
-
-extern void init_timer_stats(void);
-
-extern void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
-                                    void *timerf, char *comm, u32 flags);
-
-extern void __timer_stats_timer_set_start_info(struct timer_list *timer,
-                                              void *addr);
-
-static inline void timer_stats_timer_set_start_info(struct timer_list *timer)
-{
-       if (likely(!timer_stats_active))
-               return;
-       __timer_stats_timer_set_start_info(timer, __builtin_return_address(0));
-}
-
-static inline void timer_stats_timer_clear_start_info(struct timer_list *timer)
-{
-       timer->start_site = NULL;
-}
-#else
-static inline void init_timer_stats(void)
-{
-}
-
-static inline void timer_stats_timer_set_start_info(struct timer_list *timer)
-{
-}
-
-static inline void timer_stats_timer_clear_start_info(struct timer_list *timer)
-{
-}
-#endif
-
 extern void add_timer(struct timer_list *timer);
 
 extern int try_to_del_timer_sync(struct timer_list *timer);
index 66204007d7ac32f1f32068c1f46fc1a033e6953f..5209b5ed2a6476dfa5fe6b779f433a795f084ed7 100644 (file)
@@ -56,7 +56,8 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
 
 static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb,
                                          struct virtio_net_hdr *hdr,
-                                         bool little_endian)
+                                         bool little_endian,
+                                         bool has_data_valid)
 {
        memset(hdr, 0, sizeof(*hdr));   /* no info leak */
 
@@ -91,7 +92,8 @@ static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb,
                                skb_checksum_start_offset(skb));
                hdr->csum_offset = __cpu_to_virtio16(little_endian,
                                skb->csum_offset);
-       } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
+       } else if (has_data_valid &&
+                  skb->ip_summed == CHECKSUM_UNNECESSARY) {
                hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID;
        } /* else everything is zero */
 
index aa9bfea8804a0c74d2d6e0be1bac5eb4f3f119c0..0681fe25abeb423124f5561421aef56864cc8d69 100644 (file)
@@ -58,27 +58,28 @@ static inline void vtime_task_switch(struct task_struct *prev)
 
 extern void vtime_account_system(struct task_struct *tsk);
 extern void vtime_account_idle(struct task_struct *tsk);
-extern void vtime_account_user(struct task_struct *tsk);
 
 #else /* !CONFIG_VIRT_CPU_ACCOUNTING */
 
 static inline void vtime_task_switch(struct task_struct *prev) { }
 static inline void vtime_account_system(struct task_struct *tsk) { }
-static inline void vtime_account_user(struct task_struct *tsk) { }
 #endif /* !CONFIG_VIRT_CPU_ACCOUNTING */
 
 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
 extern void arch_vtime_task_switch(struct task_struct *tsk);
+extern void vtime_account_user(struct task_struct *tsk);
 extern void vtime_user_enter(struct task_struct *tsk);
 
 static inline void vtime_user_exit(struct task_struct *tsk)
 {
        vtime_account_user(tsk);
 }
+
 extern void vtime_guest_enter(struct task_struct *tsk);
 extern void vtime_guest_exit(struct task_struct *tsk);
 extern void vtime_init_idle(struct task_struct *tsk, int cpu);
 #else /* !CONFIG_VIRT_CPU_ACCOUNTING_GEN  */
+static inline void vtime_account_user(struct task_struct *tsk) { }
 static inline void vtime_user_enter(struct task_struct *tsk) { }
 static inline void vtime_user_exit(struct task_struct *tsk) { }
 static inline void vtime_guest_enter(struct task_struct *tsk) { }
@@ -93,9 +94,11 @@ static inline void vtime_account_irq_exit(struct task_struct *tsk)
        /* On hard|softirq exit we always account to hard|softirq cputime */
        vtime_account_system(tsk);
 }
+extern void vtime_flush(struct task_struct *tsk);
 #else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
 static inline void vtime_account_irq_enter(struct task_struct *tsk) { }
 static inline void vtime_account_irq_exit(struct task_struct *tsk) { }
+static inline void vtime_flush(struct task_struct *tsk) { }
 #endif
 
 
index 7b0066814fa001d318f29b8c747ad2ec38e194bc..5dd9a76822273ec092ea434f0489828155e834a6 100644 (file)
@@ -51,10 +51,10 @@ struct ww_mutex {
 };
 
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define __WW_CLASS_MUTEX_INITIALIZER(lockname, ww_class) \
-               , .ww_class = &ww_class
+# define __WW_CLASS_MUTEX_INITIALIZER(lockname, class) \
+               , .ww_class = class
 #else
-# define __WW_CLASS_MUTEX_INITIALIZER(lockname, ww_class)
+# define __WW_CLASS_MUTEX_INITIALIZER(lockname, class)
 #endif
 
 #define __WW_CLASS_INITIALIZER(ww_class) \
@@ -63,7 +63,7 @@ struct ww_mutex {
                , .mutex_name = #ww_class "_mutex" }
 
 #define __WW_MUTEX_INITIALIZER(lockname, class) \
-               { .base = { \__MUTEX_INITIALIZER(lockname) } \
+               { .base =  __MUTEX_INITIALIZER(lockname.base) \
                __WW_CLASS_MUTEX_INITIALIZER(lockname, class) }
 
 #define DEFINE_WW_CLASS(classname) \
@@ -186,11 +186,6 @@ static inline void ww_acquire_fini(struct ww_acquire_ctx *ctx)
 #endif
 }
 
-extern int __must_check __ww_mutex_lock(struct ww_mutex *lock,
-                                       struct ww_acquire_ctx *ctx);
-extern int __must_check __ww_mutex_lock_interruptible(struct ww_mutex *lock,
-                                                     struct ww_acquire_ctx *ctx);
-
 /**
  * ww_mutex_lock - acquire the w/w mutex
  * @lock: the mutex to be acquired
@@ -220,14 +215,7 @@ extern int __must_check __ww_mutex_lock_interruptible(struct ww_mutex *lock,
  *
  * A mutex acquired with this function must be released with ww_mutex_unlock.
  */
-static inline int ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
-{
-       if (ctx)
-               return __ww_mutex_lock(lock, ctx);
-
-       mutex_lock(&lock->base);
-       return 0;
-}
+extern int /* __must_check */ ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx);
 
 /**
  * ww_mutex_lock_interruptible - acquire the w/w mutex, interruptible
@@ -259,14 +247,8 @@ static inline int ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ct
  *
  * A mutex acquired with this function must be released with ww_mutex_unlock.
  */
-static inline int __must_check ww_mutex_lock_interruptible(struct ww_mutex *lock,
-                                                          struct ww_acquire_ctx *ctx)
-{
-       if (ctx)
-               return __ww_mutex_lock_interruptible(lock, ctx);
-       else
-               return mutex_lock_interruptible(&lock->base);
-}
+extern int __must_check ww_mutex_lock_interruptible(struct ww_mutex *lock,
+                                                   struct ww_acquire_ctx *ctx);
 
 /**
  * ww_mutex_lock_slow - slowpath acquiring of the w/w mutex
index 554671c81f4a39a8a773a01a318af0b1c70b5617..90708f68cc024e18ddb9be2b55e28aaaf6425b92 100644 (file)
@@ -987,7 +987,7 @@ static inline void hci_conn_drop(struct hci_conn *conn)
 static inline void hci_dev_put(struct hci_dev *d)
 {
        BT_DBG("%s orig refcnt %d", d->name,
-              atomic_read(&d->dev.kobj.kref.refcount));
+              kref_read(&d->dev.kobj.kref));
 
        put_device(&d->dev);
 }
@@ -995,7 +995,7 @@ static inline void hci_dev_put(struct hci_dev *d)
 static inline struct hci_dev *hci_dev_hold(struct hci_dev *d)
 {
        BT_DBG("%s orig refcnt %d", d->name,
-              atomic_read(&d->dev.kobj.kref.refcount));
+              kref_read(&d->dev.kobj.kref));
 
        get_device(&d->dev);
        return d;
index 3ebb168b9afc68ad639b5d32f6182a845c83d759..a34b141f125f0032662f147b598c9fef4fb4bcef 100644 (file)
@@ -309,6 +309,10 @@ static inline int cipso_v4_validate(const struct sk_buff *skb,
        }
 
        for (opt_iter = 6; opt_iter < opt_len;) {
+               if (opt_iter + 1 == opt_len) {
+                       err_offset = opt_iter;
+                       goto out;
+               }
                tag_len = opt[opt_iter + 1];
                if ((tag_len == 0) || (tag_len > (opt_len - opt_iter))) {
                        err_offset = opt_iter + 1;
index 487e5739166415625465fa13e0c748dcf1b894ed..dbf0abba33b8da21be05abf6e719f69542da80fc 100644 (file)
@@ -776,6 +776,11 @@ static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
 {
        u32 hash;
 
+       /* @flowlabel may include more than a flow label, eg, the traffic class.
+        * Here we want only the flow label value.
+        */
+       flowlabel &= IPV6_FLOWLABEL_MASK;
+
        if (flowlabel ||
            net->ipv6.sysctl.auto_flowlabels == IP6_AUTO_FLOW_LABEL_OFF ||
            (!autolabel &&
@@ -871,7 +876,7 @@ int ip6_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb);
  *     upper-layer output functions
  */
 int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
-            struct ipv6_txoptions *opt, int tclass);
+            __u32 mark, struct ipv6_txoptions *opt, int tclass);
 
 int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr);
 
index d4c1c75b886244f76f9539c0709bb72be7974578..0388b9c5f5e2c7257cc0eb19be469974becab5ed 100644 (file)
@@ -44,6 +44,8 @@ struct lwtunnel_encap_ops {
        int (*get_encap_size)(struct lwtunnel_state *lwtstate);
        int (*cmp_encap)(struct lwtunnel_state *a, struct lwtunnel_state *b);
        int (*xmit)(struct sk_buff *skb);
+
+       struct module *owner;
 };
 
 #ifdef CONFIG_LWTUNNEL
@@ -105,6 +107,8 @@ int lwtunnel_encap_add_ops(const struct lwtunnel_encap_ops *op,
                           unsigned int num);
 int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *op,
                           unsigned int num);
+int lwtunnel_valid_encap_type(u16 encap_type);
+int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int len);
 int lwtunnel_build_state(struct net_device *dev, u16 encap_type,
                         struct nlattr *encap,
                         unsigned int family, const void *cfg,
@@ -168,6 +172,18 @@ static inline int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *op,
        return -EOPNOTSUPP;
 }
 
+static inline int lwtunnel_valid_encap_type(u16 encap_type)
+{
+       return -EOPNOTSUPP;
+}
+static inline int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int len)
+{
+       /* return 0 since we are not walking attr looking for
+        * RTA_ENCAP_TYPE attribute on nexthops.
+        */
+       return 0;
+}
+
 static inline int lwtunnel_build_state(struct net_device *dev, u16 encap_type,
                                       struct nlattr *encap,
                                       unsigned int family, const void *cfg,
index 924325c46aab2fafc10b3b92bccafd3e00c60c71..7dfdb517f0be826018cf649bb2e410e44d8f9c36 100644 (file)
@@ -207,9 +207,9 @@ struct nft_set_iter {
        unsigned int    skip;
        int             err;
        int             (*fn)(const struct nft_ctx *ctx,
-                             const struct nft_set *set,
+                             struct nft_set *set,
                              const struct nft_set_iter *iter,
-                             const struct nft_set_elem *elem);
+                             struct nft_set_elem *elem);
 };
 
 /**
@@ -301,7 +301,7 @@ struct nft_set_ops {
        void                            (*remove)(const struct nft_set *set,
                                                  const struct nft_set_elem *elem);
        void                            (*walk)(const struct nft_ctx *ctx,
-                                               const struct nft_set *set,
+                                               struct nft_set *set,
                                                struct nft_set_iter *iter);
 
        unsigned int                    (*privsize)(const struct nlattr * const nla[]);
index cbedda077db2ca4bf4aef6e9939b53ee96079920..5ceb2205e4e3ed93461ed4a3956b227f99ac9494 100644 (file)
@@ -9,6 +9,12 @@ struct nft_fib {
 
 extern const struct nla_policy nft_fib_policy[];
 
+static inline bool
+nft_fib_is_loopback(const struct sk_buff *skb, const struct net_device *in)
+{
+       return skb->pkt_type == PACKET_LOOPBACK || in->flags & IFF_LOOPBACK;
+}
+
 int nft_fib_dump(struct sk_buff *skb, const struct nft_expr *expr);
 int nft_fib_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
                 const struct nlattr * const tb[]);
index f0e867f58722f344494c98cbdbe49d7ec875099a..c4f5e6fca17cf4e0029080410202cb66ce0fad37 100644 (file)
@@ -2006,7 +2006,9 @@ void sk_reset_timer(struct sock *sk, struct timer_list *timer,
 void sk_stop_timer(struct sock *sk, struct timer_list *timer);
 
 int __sk_queue_drop_skb(struct sock *sk, struct sk_buff *skb,
-                       unsigned int flags);
+                       unsigned int flags,
+                       void (*destructor)(struct sock *sk,
+                                          struct sk_buff *skb));
 int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
 int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
 
index 958a24d8fae794547c486b5b025f3815c96f82e7..b567e4452a4733de98b720e2c0d9060f21cc92e2 100644 (file)
@@ -352,6 +352,20 @@ static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
        }
 }
 
+static inline enum ib_mtu ib_mtu_int_to_enum(int mtu)
+{
+       if (mtu >= 4096)
+               return IB_MTU_4096;
+       else if (mtu >= 2048)
+               return IB_MTU_2048;
+       else if (mtu >= 1024)
+               return IB_MTU_1024;
+       else if (mtu >= 512)
+               return IB_MTU_512;
+       else
+               return IB_MTU_256;
+}
+
 enum ib_port_state {
        IB_PORT_NOP             = 0,
        IB_PORT_DOWN            = 1,
index 6902c2a8bd23c2e43f9b8bd7b688c0cc4a6a3ca4..4b6b489a8d7c2defaf51e9ecb121c91901b9cac8 100644 (file)
@@ -55,17 +55,17 @@ struct mcip_cmd {
 
 struct mcip_bcr {
 #ifdef CONFIG_CPU_BIG_ENDIAN
-               unsigned int pad3:8,
-                            idu:1, llm:1, num_cores:6,
-                            iocoh:1,  gfrc:1, dbg:1, pad2:1,
-                            msg:1, sem:1, ipi:1, pad:1,
+               unsigned int pad4:6, pw_dom:1, pad3:1,
+                            idu:1, pad2:1, num_cores:6,
+                            pad:1,  gfrc:1, dbg:1, pw:1,
+                            msg:1, sem:1, ipi:1, slv:1,
                             ver:8;
 #else
                unsigned int ver:8,
-                            pad:1, ipi:1, sem:1, msg:1,
-                            pad2:1, dbg:1, gfrc:1, iocoh:1,
-                            num_cores:6, llm:1, idu:1,
-                            pad3:8;
+                            slv:1, ipi:1, sem:1, msg:1,
+                            pw:1, dbg:1, gfrc:1, pad:1,
+                            num_cores:6, pad2:1, idu:1,
+                            pad3:1, pw_dom:1, pad4:6;
 #endif
 };
 
index dc10c52e0e9199e7e878a87a714569106a955df3..393362bdb86041bc90f5bfac9101af4fccb642f4 100644 (file)
@@ -81,6 +81,7 @@
 #define                        AT91_DDRSDRC_LPCB_POWER_DOWN            2
 #define                        AT91_DDRSDRC_LPCB_DEEP_POWER_DOWN       3
 #define                AT91_DDRSDRC_CLKFR      (1 << 2)        /* Clock Frozen */
+#define                AT91_DDRSDRC_LPDDR2_PWOFF       (1 << 3)        /* LPDDR Power Off */
 #define                AT91_DDRSDRC_PASR       (7 << 4)        /* Partial Array Self Refresh */
 #define                AT91_DDRSDRC_TCSR       (3 << 8)        /* Temperature Compensated Self Refresh */
 #define                AT91_DDRSDRC_DS         (3 << 10)       /* Drive Strength */
@@ -96,7 +97,9 @@
 #define                        AT91_DDRSDRC_MD_SDR             0
 #define                        AT91_DDRSDRC_MD_LOW_POWER_SDR   1
 #define                        AT91_DDRSDRC_MD_LOW_POWER_DDR   3
+#define                        AT91_DDRSDRC_MD_LPDDR3          5
 #define                        AT91_DDRSDRC_MD_DDR2            6       /* [SAM9 Only] */
+#define                        AT91_DDRSDRC_MD_LPDDR2          7
 #define                AT91_DDRSDRC_DBW        (1 << 4)                /* Data Bus Width */
 #define                        AT91_DDRSDRC_DBW_32BITS         (0 <<  4)
 #define                        AT91_DDRSDRC_DBW_16BITS         (1 <<  4)
index 43edf82e54fffce7b3d8bfed8c832aebfc60c79b..da854fb4530f1f75e9f46a8ad57ec0251217fd7b 100644 (file)
@@ -538,6 +538,7 @@ struct se_node_acl {
        char                    initiatorname[TRANSPORT_IQN_LEN];
        /* Used to signal demo mode created ACL, disabled by default */
        bool                    dynamic_node_acl;
+       bool                    dynamic_stop;
        u32                     queue_depth;
        u32                     acl_index;
        enum target_prot_type   saved_prot_type;
index 9d4f9b3a2b7b26ddedbd5b61d2add58c9a94cdab..e3facb356838c912e86e1e4c7a9f25c2f10555c2 100644 (file)
@@ -385,11 +385,11 @@ TRACE_EVENT(rcu_quiescent_state_report,
 
 /*
  * Tracepoint for quiescent states detected by force_quiescent_state().
- * These trace events include the type of RCU, the grace-period number
- * that was blocked by the CPU, the CPU itself, and the type of quiescent
- * state, which can be "dti" for dyntick-idle mode, "ofl" for CPU offline,
- * or "kick" when kicking a CPU that has been in dyntick-idle mode for
- * too long.
+ * These trace events include the type of RCU, the grace-period number that
+ * was blocked by the CPU, the CPU itself, and the type of quiescent state,
+ * which can be "dti" for dyntick-idle mode, "ofl" for CPU offline, "kick"
+ * when kicking a CPU that has been in dyntick-idle mode for too long, or
+ * "rqc" if the CPU got a quiescent state via its rcu_qs_ctr.
  */
 TRACE_EVENT(rcu_fqs,
 
index 1448637616d648475d5ccd28945df6fce617bca0..1bca99dbb98f868ee2b066c5b80f55a59ecee086 100644 (file)
@@ -269,17 +269,17 @@ DEFINE_EVENT(hrtimer_class, hrtimer_cancel,
 TRACE_EVENT(itimer_state,
 
        TP_PROTO(int which, const struct itimerval *const value,
-                cputime_t expires),
+                unsigned long long expires),
 
        TP_ARGS(which, value, expires),
 
        TP_STRUCT__entry(
-               __field(        int,            which           )
-               __field(        cputime_t,      expires         )
-               __field(        long,           value_sec       )
-               __field(        long,           value_usec      )
-               __field(        long,           interval_sec    )
-               __field(        long,           interval_usec   )
+               __field(        int,                    which           )
+               __field(        unsigned long long,     expires         )
+               __field(        long,                   value_sec       )
+               __field(        long,                   value_usec      )
+               __field(        long,                   interval_sec    )
+               __field(        long,                   interval_usec   )
        ),
 
        TP_fast_assign(
@@ -292,7 +292,7 @@ TRACE_EVENT(itimer_state,
        ),
 
        TP_printk("which=%d expires=%llu it_value=%ld.%ld it_interval=%ld.%ld",
-                 __entry->which, (unsigned long long)__entry->expires,
+                 __entry->which, __entry->expires,
                  __entry->value_sec, __entry->value_usec,
                  __entry->interval_sec, __entry->interval_usec)
 );
@@ -305,14 +305,14 @@ TRACE_EVENT(itimer_state,
  */
 TRACE_EVENT(itimer_expire,
 
-       TP_PROTO(int which, struct pid *pid, cputime_t now),
+       TP_PROTO(int which, struct pid *pid, unsigned long long now),
 
        TP_ARGS(which, pid, now),
 
        TP_STRUCT__entry(
-               __field( int ,          which   )
-               __field( pid_t,         pid     )
-               __field( cputime_t,     now     )
+               __field( int ,                  which   )
+               __field( pid_t,                 pid     )
+               __field( unsigned long long,    now     )
        ),
 
        TP_fast_assign(
@@ -322,7 +322,7 @@ TRACE_EVENT(itimer_expire,
        ),
 
        TP_printk("which=%d pid=%d now=%llu", __entry->which,
-                 (int) __entry->pid, (unsigned long long)__entry->now)
+                 (int) __entry->pid, __entry->now)
 );
 
 #ifdef CONFIG_NO_HZ_COMMON
index 0eb0e87dbe9f511672102f2123129328288a9159..d2b0ac799d03c925a6eec2b49bdb14525331687c 100644 (file)
@@ -116,6 +116,12 @@ enum bpf_attach_type {
 
 #define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE
 
+/* If BPF_F_ALLOW_OVERRIDE flag is used in BPF_PROG_ATTACH command
+ * to the given target_fd cgroup the descendent cgroup will be able to
+ * override effective bpf program that was inherited from this cgroup
+ */
+#define BPF_F_ALLOW_OVERRIDE   (1U << 0)
+
 #define BPF_PSEUDO_MAP_FD      1
 
 /* flags for BPF_MAP_UPDATE_ELEM command */
@@ -171,6 +177,7 @@ union bpf_attr {
                __u32           target_fd;      /* container object to attach to */
                __u32           attach_bpf_fd;  /* eBPF program to attach */
                __u32           attach_type;
+               __u32           attach_flags;
        };
 } __attribute__((aligned(8)));
 
index 3cbc327801d6dc625f3f7786730fb07d065ac760..c451eec42a83101a6eea0219788165600d128e49 100644 (file)
@@ -1665,14 +1665,15 @@ static inline void cec_msg_report_current_latency(struct cec_msg *msg,
                                                  __u8 audio_out_compensated,
                                                  __u8 audio_out_delay)
 {
-       msg->len = 7;
+       msg->len = 6;
        msg->msg[0] |= 0xf; /* broadcast */
        msg->msg[1] = CEC_MSG_REPORT_CURRENT_LATENCY;
        msg->msg[2] = phys_addr >> 8;
        msg->msg[3] = phys_addr & 0xff;
        msg->msg[4] = video_latency;
        msg->msg[5] = (low_latency_mode << 2) | audio_out_compensated;
-       msg->msg[6] = audio_out_delay;
+       if (audio_out_compensated == 3)
+               msg->msg[msg->len++] = audio_out_delay;
 }
 
 static inline void cec_ops_report_current_latency(const struct cec_msg *msg,
@@ -1686,7 +1687,10 @@ static inline void cec_ops_report_current_latency(const struct cec_msg *msg,
        *video_latency = msg->msg[4];
        *low_latency_mode = (msg->msg[5] >> 2) & 1;
        *audio_out_compensated = msg->msg[5] & 3;
-       *audio_out_delay = msg->msg[6];
+       if (*audio_out_compensated == 3 && msg->len >= 7)
+               *audio_out_delay = msg->msg[6];
+       else
+               *audio_out_delay = 0;
 }
 
 static inline void cec_msg_request_current_latency(struct cec_msg *msg,
index f0db7788f887b9947e0e1aa78d48a9980e80bddf..3dc91a46e8b8da0b243a12a168bbf205e5a87916 100644 (file)
@@ -1384,6 +1384,8 @@ enum ethtool_link_mode_bit_indices {
        ETHTOOL_LINK_MODE_10000baseLR_Full_BIT  = 44,
        ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT = 45,
        ETHTOOL_LINK_MODE_10000baseER_Full_BIT  = 46,
+       ETHTOOL_LINK_MODE_2500baseT_Full_BIT    = 47,
+       ETHTOOL_LINK_MODE_5000baseT_Full_BIT    = 48,
 
 
        /* Last allowed bit for __ETHTOOL_LINK_MODE_LEGACY_MASK is bit
@@ -1393,7 +1395,7 @@ enum ethtool_link_mode_bit_indices {
         */
 
        __ETHTOOL_LINK_MODE_LAST
-         = ETHTOOL_LINK_MODE_10000baseER_Full_BIT,
+         = ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
 };
 
 #define __ETHTOOL_LINK_MODE_LEGACY_MASK(base_name)     \
index 85ddb74fcd1c8e17d027be5cdb9310048c2d50d8..b23c1914a18217a3e1e185dc0bd47211c98f20a6 100644 (file)
@@ -9,9 +9,8 @@
 
 #include <linux/types.h>
 #include <linux/socket.h>
-#ifndef __KERNEL__
-#include <netinet/in.h>
-#endif
+#include <linux/in.h>
+#include <linux/in6.h>
 
 #define IPPROTO_L2TP           115
 
@@ -31,7 +30,7 @@ struct sockaddr_l2tpip {
        __u32           l2tp_conn_id;   /* Connection ID of tunnel */
 
        /* Pad to size of `struct sockaddr'. */
-       unsigned char   __pad[sizeof(struct sockaddr) -
+       unsigned char   __pad[__SOCK_SIZE__ -
                              sizeof(__kernel_sa_family_t) -
                              sizeof(__be16) - sizeof(struct in_addr) -
                              sizeof(__u32)];
index 8be21e02387db67010fb26b50abb59201b9c0e5c..d0b5fa91ff5493fd4af4dbed049102a8c65debf3 100644 (file)
@@ -9,4 +9,6 @@
 #define NF_LOG_MACDECODE       0x20    /* Decode MAC header */
 #define NF_LOG_MASK            0x2f
 
+#define NF_LOG_PREFIXLEN       128
+
 #endif /* _NETFILTER_NF_LOG_H */
index 881d49e94569648d2f735e81b34fa567627f3019..e3f27e09eb2be460ebfea6a3b8f2d1ffb38d3010 100644 (file)
@@ -235,7 +235,7 @@ enum nft_rule_compat_flags {
 /**
  * enum nft_rule_compat_attributes - nf_tables rule compat attributes
  *
- * @NFTA_RULE_COMPAT_PROTO: numerice value of handled protocol (NLA_U32)
+ * @NFTA_RULE_COMPAT_PROTO: numeric value of handled protocol (NLA_U32)
  * @NFTA_RULE_COMPAT_FLAGS: bitmask of enum nft_rule_compat_flags (NLA_U32)
  */
 enum nft_rule_compat_attributes {
@@ -499,7 +499,7 @@ enum nft_bitwise_attributes {
  * enum nft_byteorder_ops - nf_tables byteorder operators
  *
  * @NFT_BYTEORDER_NTOH: network to host operator
- * @NFT_BYTEORDER_HTON: host to network opertaor
+ * @NFT_BYTEORDER_HTON: host to network operator
  */
 enum nft_byteorder_ops {
        NFT_BYTEORDER_NTOH,
index c396a8052f73cd91c12b676733f93b68416cd50a..052799e4d751c805de01bc8ed47c3d0a1ecde936 100644 (file)
@@ -23,14 +23,12 @@ struct ipv6_sr_hdr {
        __u8    type;
        __u8    segments_left;
        __u8    first_segment;
-       __u8    flag_1;
-       __u8    flag_2;
-       __u8    reserved;
+       __u8    flags;
+       __u16   reserved;
 
        struct in6_addr segments[0];
 };
 
-#define SR6_FLAG1_CLEANUP      (1 << 7)
 #define SR6_FLAG1_PROTECTED    (1 << 6)
 #define SR6_FLAG1_OAM          (1 << 5)
 #define SR6_FLAG1_ALERT                (1 << 4)
@@ -42,8 +40,7 @@ struct ipv6_sr_hdr {
 #define SR6_TLV_PADDING                4
 #define SR6_TLV_HMAC           5
 
-#define sr_has_cleanup(srh) ((srh)->flag_1 & SR6_FLAG1_CLEANUP)
-#define sr_has_hmac(srh) ((srh)->flag_1 & SR6_FLAG1_HMAC)
+#define sr_has_hmac(srh) ((srh)->flags & SR6_FLAG1_HMAC)
 
 struct sr6_tlv {
        __u8 type;
index 46e8a2e369f907716337db007dca0aba1f5f7477..45184a2ef66c219c893769dd73bc5c3485a5c814 100644 (file)
@@ -362,8 +362,8 @@ enum v4l2_quantization {
        /*
         * The default for R'G'B' quantization is always full range, except
         * for the BT2020 colorspace. For Y'CbCr the quantization is always
-        * limited range, except for COLORSPACE_JPEG, SRGB, ADOBERGB,
-        * XV601 or XV709: those are full range.
+        * limited range, except for COLORSPACE_JPEG, XV601 or XV709: those
+        * are full range.
         */
        V4L2_QUANTIZATION_DEFAULT     = 0,
        V4L2_QUANTIZATION_FULL_RANGE  = 1,
@@ -379,8 +379,7 @@ enum v4l2_quantization {
        (((is_rgb_or_hsv) && (colsp) == V4L2_COLORSPACE_BT2020) ? \
         V4L2_QUANTIZATION_LIM_RANGE : \
         (((is_rgb_or_hsv) || (ycbcr_enc) == V4L2_YCBCR_ENC_XV601 || \
-         (ycbcr_enc) == V4L2_YCBCR_ENC_XV709 || (colsp) == V4L2_COLORSPACE_JPEG) || \
-         (colsp) == V4L2_COLORSPACE_ADOBERGB || (colsp) == V4L2_COLORSPACE_SRGB ? \
+         (ycbcr_enc) == V4L2_YCBCR_ENC_XV709 || (colsp) == V4L2_COLORSPACE_JPEG) ? \
         V4L2_QUANTIZATION_FULL_RANGE : V4L2_QUANTIZATION_LIM_RANGE))
 
 enum v4l2_priority {
index 82bdf5626859989085f831ee3a3a70c1b26ff066..bb68cb1b04ed3893faccac5eed815f50521f3813 100644 (file)
@@ -16,3 +16,4 @@ header-y += nes-abi.h
 header-y += ocrdma-abi.h
 header-y += hns-abi.h
 header-y += vmw_pvrdma-abi.h
+header-y += qedr-abi.h
index 48a19bda071b8db5396991dcd97cd9e0f3872ecf..d24eee12128fc5cb7d1f5612e85a7fb9b194520f 100644 (file)
@@ -30,7 +30,7 @@
  * SOFTWARE.
  */
 #ifndef CXGB3_ABI_USER_H
-#define CXBG3_ABI_USER_H
+#define CXGB3_ABI_USER_H
 
 #include <linux/types.h>
 
index dfdfe4e92d3118c0ef42b5c5d64f240eba50dee5..f4f87cff6dc6c5fc9f1a91e32c9e8d3a601f10c8 100644 (file)
@@ -37,7 +37,6 @@
 #define IB_USER_VERBS_H
 
 #include <linux/types.h>
-#include <rdma/ib_verbs.h>
 
 /*
  * Increment this value if any changes that break userspace ABI
@@ -548,11 +547,17 @@ enum {
 };
 
 enum {
-       IB_USER_LEGACY_LAST_QP_ATTR_MASK = IB_QP_DEST_QPN
+       /*
+        * This value is equal to IB_QP_DEST_QPN.
+        */
+       IB_USER_LEGACY_LAST_QP_ATTR_MASK = 1ULL << 20,
 };
 
 enum {
-       IB_USER_LAST_QP_ATTR_MASK = IB_QP_RATE_LIMIT
+       /*
+        * This value is equal to IB_QP_RATE_LIMIT.
+        */
+       IB_USER_LAST_QP_ATTR_MASK = 1ULL << 25,
 };
 
 struct ib_uverbs_ex_create_qp {
index e1a937348a3ed2bb3a76820e1ffa6a542f6aa9fb..2655abb8f310318b39565a08dea727186747daf5 100644 (file)
@@ -529,7 +529,6 @@ config SRCU
 config TASKS_RCU
        bool
        default n
-       depends on !UML
        select SRCU
        help
          This option enables a task-based RCU implementation that uses
@@ -781,19 +780,6 @@ config RCU_NOCB_CPU_ALL
 
 endchoice
 
-config RCU_EXPEDITE_BOOT
-       bool
-       default n
-       help
-         This option enables expedited grace periods at boot time,
-         as if rcu_expedite_gp() had been invoked early in boot.
-         The corresponding rcu_unexpedite_gp() is invoked from
-         rcu_end_inkernel_boot(), which is intended to be invoked
-         at the end of the kernel-only boot sequence, just before
-         init is exec'ed.
-
-         Accept the default if unsure.
-
 endmenu # "RCU Subsystem"
 
 config BUILD_BIN2C
@@ -1987,6 +1973,10 @@ config MODVERSIONS
          make them incompatible with the kernel you are running.  If
          unsure, say N.
 
+config MODULE_REL_CRCS
+       bool
+       depends on MODVERSIONS
+
 config MODULE_SRCVERSION_ALL
        bool "Source checksum for all modules"
        help
index b0c9d6facef9a5aced55d1443b40029a660011e8..6ced14a3df12a8ae55b3c16bd03525de6a7c20ea 100644 (file)
@@ -625,7 +625,6 @@ asmlinkage __visible void __init start_kernel(void)
        numa_policy_init();
        if (late_time_init)
                late_time_init();
-       sched_clock_init();
        calibrate_delay();
        pidmap_init();
        anon_vma_init();
@@ -663,7 +662,6 @@ asmlinkage __visible void __init start_kernel(void)
        sfi_init_late();
 
        if (efi_enabled(EFI_RUNTIME_SERVICES)) {
-               efi_late_init();
                efi_free_boot_services();
        }
 
index fe41a63efed63b54831b2546df1553288052b97c..5606341e9efd5510c9b8ede9eb01b12b9965e6c9 100644 (file)
@@ -23,9 +23,7 @@ int version_string(LINUX_VERSION_CODE);
 #endif
 
 struct uts_namespace init_uts_ns = {
-       .kref = {
-               .refcount       = ATOMIC_INIT(2),
-       },
+       .kref = KREF_INIT(2),
        .name = {
                .sysname        = UTS_SYSNAME,
                .nodename       = UTS_NODENAME,
index 74963d192c5d96ce3674f0adbf554c95af4867ec..ca9cb55b585599bcf78c78adc7117e70f23d6c33 100644 (file)
@@ -453,8 +453,8 @@ static void fill_ac(acct_t *ac)
        spin_lock_irq(&current->sighand->siglock);
        tty = current->signal->tty;     /* Safe as we hold the siglock */
        ac->ac_tty = tty ? old_encode_dev(tty_devnum(tty)) : 0;
-       ac->ac_utime = encode_comp_t(jiffies_to_AHZ(cputime_to_jiffies(pacct->ac_utime)));
-       ac->ac_stime = encode_comp_t(jiffies_to_AHZ(cputime_to_jiffies(pacct->ac_stime)));
+       ac->ac_utime = encode_comp_t(nsec_to_AHZ(pacct->ac_utime));
+       ac->ac_stime = encode_comp_t(nsec_to_AHZ(pacct->ac_stime));
        ac->ac_flag = pacct->ac_flag;
        ac->ac_mem = encode_comp_t(pacct->ac_mem);
        ac->ac_minflt = encode_comp_t(pacct->ac_minflt);
@@ -530,7 +530,7 @@ out:
 void acct_collect(long exitcode, int group_dead)
 {
        struct pacct_struct *pacct = &current->signal->pacct;
-       cputime_t utime, stime;
+       u64 utime, stime;
        unsigned long vsize = 0;
 
        if (group_dead && current->mm) {
@@ -559,6 +559,7 @@ void acct_collect(long exitcode, int group_dead)
                pacct->ac_flag |= ACORE;
        if (current->flags & PF_SIGNALED)
                pacct->ac_flag |= AXSIG;
+
        task_cputime(current, &utime, &stime);
        pacct->ac_utime += utime;
        pacct->ac_stime += stime;
index 229a5d5df9770fc66774bf5defea359873946d01..3d55d95dcf49e600fe8f99c92eb57e7cde043208 100644 (file)
@@ -11,7 +11,6 @@
  */
 #include <linux/bpf.h>
 #include <linux/err.h>
-#include <linux/vmalloc.h>
 #include <linux/slab.h>
 #include <linux/mm.h>
 #include <linux/filter.h>
@@ -74,14 +73,10 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
        if (array_size >= U32_MAX - PAGE_SIZE)
                return ERR_PTR(-ENOMEM);
 
-
        /* allocate all map elements and zero-initialize them */
-       array = kzalloc(array_size, GFP_USER | __GFP_NOWARN);
-       if (!array) {
-               array = vzalloc(array_size);
-               if (!array)
-                       return ERR_PTR(-ENOMEM);
-       }
+       array = bpf_map_area_alloc(array_size);
+       if (!array)
+               return ERR_PTR(-ENOMEM);
 
        /* copy mandatory map attributes */
        array->map.map_type = attr->map_type;
@@ -97,7 +92,7 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
 
        if (array_size >= U32_MAX - PAGE_SIZE ||
            elem_size > PCPU_MIN_UNIT_SIZE || bpf_array_alloc_percpu(array)) {
-               kvfree(array);
+               bpf_map_area_free(array);
                return ERR_PTR(-ENOMEM);
        }
 out:
@@ -262,7 +257,7 @@ static void array_map_free(struct bpf_map *map)
        if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
                bpf_array_free_percpu(array);
 
-       kvfree(array);
+       bpf_map_area_free(array);
 }
 
 static const struct bpf_map_ops array_ops = {
@@ -319,7 +314,8 @@ static void fd_array_map_free(struct bpf_map *map)
        /* make sure it's empty */
        for (i = 0; i < array->map.max_entries; i++)
                BUG_ON(array->ptrs[i] != NULL);
-       kvfree(array);
+
+       bpf_map_area_free(array);
 }
 
 static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key)
index a515f7b007c61245e7f5397a9d9ab7390a6bd047..da0f53690295610ff5ae447ed98fb6e70c99c4f1 100644 (file)
@@ -52,6 +52,7 @@ void cgroup_bpf_inherit(struct cgroup *cgrp, struct cgroup *parent)
                e = rcu_dereference_protected(parent->bpf.effective[type],
                                              lockdep_is_held(&cgroup_mutex));
                rcu_assign_pointer(cgrp->bpf.effective[type], e);
+               cgrp->bpf.disallow_override[type] = parent->bpf.disallow_override[type];
        }
 }
 
@@ -82,30 +83,63 @@ void cgroup_bpf_inherit(struct cgroup *cgrp, struct cgroup *parent)
  *
  * Must be called with cgroup_mutex held.
  */
-void __cgroup_bpf_update(struct cgroup *cgrp,
-                        struct cgroup *parent,
-                        struct bpf_prog *prog,
-                        enum bpf_attach_type type)
+int __cgroup_bpf_update(struct cgroup *cgrp, struct cgroup *parent,
+                       struct bpf_prog *prog, enum bpf_attach_type type,
+                       bool new_overridable)
 {
-       struct bpf_prog *old_prog, *effective;
+       struct bpf_prog *old_prog, *effective = NULL;
        struct cgroup_subsys_state *pos;
+       bool overridable = true;
 
-       old_prog = xchg(cgrp->bpf.prog + type, prog);
+       if (parent) {
+               overridable = !parent->bpf.disallow_override[type];
+               effective = rcu_dereference_protected(parent->bpf.effective[type],
+                                                     lockdep_is_held(&cgroup_mutex));
+       }
+
+       if (prog && effective && !overridable)
+               /* if parent has non-overridable prog attached, disallow
+                * attaching new programs to descendent cgroup
+                */
+               return -EPERM;
+
+       if (prog && effective && overridable != new_overridable)
+               /* if parent has overridable prog attached, only
+                * allow overridable programs in descendent cgroup
+                */
+               return -EPERM;
 
-       effective = (!prog && parent) ?
-               rcu_dereference_protected(parent->bpf.effective[type],
-                                         lockdep_is_held(&cgroup_mutex)) :
-               prog;
+       old_prog = cgrp->bpf.prog[type];
+
+       if (prog) {
+               overridable = new_overridable;
+               effective = prog;
+               if (old_prog &&
+                   cgrp->bpf.disallow_override[type] == new_overridable)
+                       /* disallow attaching non-overridable on top
+                        * of existing overridable in this cgroup
+                        * and vice versa
+                        */
+                       return -EPERM;
+       }
+
+       if (!prog && !old_prog)
+               /* report error when trying to detach and nothing is attached */
+               return -ENOENT;
+
+       cgrp->bpf.prog[type] = prog;
 
        css_for_each_descendant_pre(pos, &cgrp->self) {
                struct cgroup *desc = container_of(pos, struct cgroup, self);
 
                /* skip the subtree if the descendant has its own program */
-               if (desc->bpf.prog[type] && desc != cgrp)
+               if (desc->bpf.prog[type] && desc != cgrp) {
                        pos = css_rightmost_descendant(pos);
-               else
+               } else {
                        rcu_assign_pointer(desc->bpf.effective[type],
                                           effective);
+                       desc->bpf.disallow_override[type] = !overridable;
+               }
        }
 
        if (prog)
@@ -115,6 +149,7 @@ void __cgroup_bpf_update(struct cgroup *cgrp,
                bpf_prog_put(old_prog);
                static_branch_dec(&cgroup_bpf_enabled_key);
        }
+       return 0;
 }
 
 /**
index 3f2bb58952d8dfa4a2e082f9e6f9d277e19f0577..a753bbe7df0a1747658ca75325e28880173613f8 100644 (file)
@@ -13,7 +13,6 @@
 #include <linux/bpf.h>
 #include <linux/jhash.h>
 #include <linux/filter.h>
-#include <linux/vmalloc.h>
 #include "percpu_freelist.h"
 #include "bpf_lru_list.h"
 
@@ -103,7 +102,7 @@ static void htab_free_elems(struct bpf_htab *htab)
                free_percpu(pptr);
        }
 free_elems:
-       vfree(htab->elems);
+       bpf_map_area_free(htab->elems);
 }
 
 static struct htab_elem *prealloc_lru_pop(struct bpf_htab *htab, void *key,
@@ -125,7 +124,8 @@ static int prealloc_init(struct bpf_htab *htab)
 {
        int err = -ENOMEM, i;
 
-       htab->elems = vzalloc(htab->elem_size * htab->map.max_entries);
+       htab->elems = bpf_map_area_alloc(htab->elem_size *
+                                        htab->map.max_entries);
        if (!htab->elems)
                return -ENOMEM;
 
@@ -320,14 +320,10 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
                goto free_htab;
 
        err = -ENOMEM;
-       htab->buckets = kmalloc_array(htab->n_buckets, sizeof(struct bucket),
-                                     GFP_USER | __GFP_NOWARN);
-
-       if (!htab->buckets) {
-               htab->buckets = vmalloc(htab->n_buckets * sizeof(struct bucket));
-               if (!htab->buckets)
-                       goto free_htab;
-       }
+       htab->buckets = bpf_map_area_alloc(htab->n_buckets *
+                                          sizeof(struct bucket));
+       if (!htab->buckets)
+               goto free_htab;
 
        for (i = 0; i < htab->n_buckets; i++) {
                INIT_HLIST_HEAD(&htab->buckets[i].head);
@@ -354,7 +350,7 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
 free_extra_elems:
        free_percpu(htab->extra_elems);
 free_buckets:
-       kvfree(htab->buckets);
+       bpf_map_area_free(htab->buckets);
 free_htab:
        kfree(htab);
        return ERR_PTR(err);
@@ -1014,7 +1010,7 @@ static void htab_map_free(struct bpf_map *map)
                prealloc_destroy(htab);
 
        free_percpu(htab->extra_elems);
-       kvfree(htab->buckets);
+       bpf_map_area_free(htab->buckets);
        kfree(htab);
 }
 
index 732ae16d12b720e6be3c16b8922b7138e14d08dd..be8519148c255efb92704b5e3b0de102ac4c209c 100644 (file)
@@ -7,7 +7,6 @@
 #include <linux/bpf.h>
 #include <linux/jhash.h>
 #include <linux/filter.h>
-#include <linux/vmalloc.h>
 #include <linux/stacktrace.h>
 #include <linux/perf_event.h>
 #include "percpu_freelist.h"
@@ -32,7 +31,7 @@ static int prealloc_elems_and_freelist(struct bpf_stack_map *smap)
        u32 elem_size = sizeof(struct stack_map_bucket) + smap->map.value_size;
        int err;
 
-       smap->elems = vzalloc(elem_size * smap->map.max_entries);
+       smap->elems = bpf_map_area_alloc(elem_size * smap->map.max_entries);
        if (!smap->elems)
                return -ENOMEM;
 
@@ -45,7 +44,7 @@ static int prealloc_elems_and_freelist(struct bpf_stack_map *smap)
        return 0;
 
 free_elems:
-       vfree(smap->elems);
+       bpf_map_area_free(smap->elems);
        return err;
 }
 
@@ -76,12 +75,9 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
        if (cost >= U32_MAX - PAGE_SIZE)
                return ERR_PTR(-E2BIG);
 
-       smap = kzalloc(cost, GFP_USER | __GFP_NOWARN);
-       if (!smap) {
-               smap = vzalloc(cost);
-               if (!smap)
-                       return ERR_PTR(-ENOMEM);
-       }
+       smap = bpf_map_area_alloc(cost);
+       if (!smap)
+               return ERR_PTR(-ENOMEM);
 
        err = -E2BIG;
        cost += n_buckets * (value_size + sizeof(struct stack_map_bucket));
@@ -112,7 +108,7 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
 put_buffers:
        put_callchain_buffers();
 free_smap:
-       kvfree(smap);
+       bpf_map_area_free(smap);
        return ERR_PTR(err);
 }
 
@@ -262,9 +258,9 @@ static void stack_map_free(struct bpf_map *map)
        /* wait for bpf programs to complete before freeing stack map */
        synchronize_rcu();
 
-       vfree(smap->elems);
+       bpf_map_area_free(smap->elems);
        pcpu_freelist_destroy(&smap->freelist);
-       kvfree(smap);
+       bpf_map_area_free(smap);
        put_callchain_buffers();
 }
 
index 1d6b29e4e2c35ec14cec0a0d68bb7b636102a395..bbb016adbaeb61c5d46a525f3e7235d11535335e 100644 (file)
@@ -12,6 +12,8 @@
 #include <linux/bpf.h>
 #include <linux/syscalls.h>
 #include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/mmzone.h>
 #include <linux/anon_inodes.h>
 #include <linux/file.h>
 #include <linux/license.h>
@@ -49,6 +51,30 @@ void bpf_register_map_type(struct bpf_map_type_list *tl)
        list_add(&tl->list_node, &bpf_map_types);
 }
 
+void *bpf_map_area_alloc(size_t size)
+{
+       /* We definitely need __GFP_NORETRY, so OOM killer doesn't
+        * trigger under memory pressure as we really just want to
+        * fail instead.
+        */
+       const gfp_t flags = __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO;
+       void *area;
+
+       if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
+               area = kmalloc(size, GFP_USER | flags);
+               if (area != NULL)
+                       return area;
+       }
+
+       return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | flags,
+                        PAGE_KERNEL);
+}
+
+void bpf_map_area_free(void *area)
+{
+       kvfree(area);
+}
+
 int bpf_map_precharge_memlock(u32 pages)
 {
        struct user_struct *user = get_current_user();
@@ -894,13 +920,14 @@ static int bpf_obj_get(const union bpf_attr *attr)
 
 #ifdef CONFIG_CGROUP_BPF
 
-#define BPF_PROG_ATTACH_LAST_FIELD attach_type
+#define BPF_PROG_ATTACH_LAST_FIELD attach_flags
 
 static int bpf_prog_attach(const union bpf_attr *attr)
 {
+       enum bpf_prog_type ptype;
        struct bpf_prog *prog;
        struct cgroup *cgrp;
-       enum bpf_prog_type ptype;
+       int ret;
 
        if (!capable(CAP_NET_ADMIN))
                return -EPERM;
@@ -908,6 +935,9 @@ static int bpf_prog_attach(const union bpf_attr *attr)
        if (CHECK_ATTR(BPF_PROG_ATTACH))
                return -EINVAL;
 
+       if (attr->attach_flags & ~BPF_F_ALLOW_OVERRIDE)
+               return -EINVAL;
+
        switch (attr->attach_type) {
        case BPF_CGROUP_INET_INGRESS:
        case BPF_CGROUP_INET_EGRESS:
@@ -930,10 +960,13 @@ static int bpf_prog_attach(const union bpf_attr *attr)
                return PTR_ERR(cgrp);
        }
 
-       cgroup_bpf_update(cgrp, prog, attr->attach_type);
+       ret = cgroup_bpf_update(cgrp, prog, attr->attach_type,
+                               attr->attach_flags & BPF_F_ALLOW_OVERRIDE);
+       if (ret)
+               bpf_prog_put(prog);
        cgroup_put(cgrp);
 
-       return 0;
+       return ret;
 }
 
 #define BPF_PROG_DETACH_LAST_FIELD attach_type
@@ -941,6 +974,7 @@ static int bpf_prog_attach(const union bpf_attr *attr)
 static int bpf_prog_detach(const union bpf_attr *attr)
 {
        struct cgroup *cgrp;
+       int ret;
 
        if (!capable(CAP_NET_ADMIN))
                return -EPERM;
@@ -956,7 +990,7 @@ static int bpf_prog_detach(const union bpf_attr *attr)
                if (IS_ERR(cgrp))
                        return PTR_ERR(cgrp);
 
-               cgroup_bpf_update(cgrp, NULL, attr->attach_type);
+               ret = cgroup_bpf_update(cgrp, NULL, attr->attach_type, false);
                cgroup_put(cgrp);
                break;
 
@@ -964,7 +998,7 @@ static int bpf_prog_detach(const union bpf_attr *attr)
                return -EINVAL;
        }
 
-       return 0;
+       return ret;
 }
 #endif /* CONFIG_CGROUP_BPF */
 
index 2ee9ec3051b20774b118a57e4609f30e87bf82be..53bbca7c48598e44fa5e45f88626027749ca8932 100644 (file)
@@ -5221,6 +5221,11 @@ err_free_css:
        return ERR_PTR(err);
 }
 
+/*
+ * The returned cgroup is fully initialized including its control mask, but
+ * it isn't associated with its kernfs_node and doesn't have the control
+ * mask applied.
+ */
 static struct cgroup *cgroup_create(struct cgroup *parent)
 {
        struct cgroup_root *root = parent->root;
@@ -5288,11 +5293,6 @@ static struct cgroup *cgroup_create(struct cgroup *parent)
 
        cgroup_propagate_control(cgrp);
 
-       /* @cgrp doesn't have dir yet so the following will only create csses */
-       ret = cgroup_apply_control_enable(cgrp);
-       if (ret)
-               goto out_destroy;
-
        return cgrp;
 
 out_cancel_ref:
@@ -5300,9 +5300,6 @@ out_cancel_ref:
 out_free_cgrp:
        kfree(cgrp);
        return ERR_PTR(ret);
-out_destroy:
-       cgroup_destroy_locked(cgrp);
-       return ERR_PTR(ret);
 }
 
 static int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
@@ -6501,15 +6498,16 @@ static __init int cgroup_namespaces_init(void)
 subsys_initcall(cgroup_namespaces_init);
 
 #ifdef CONFIG_CGROUP_BPF
-void cgroup_bpf_update(struct cgroup *cgrp,
-                      struct bpf_prog *prog,
-                      enum bpf_attach_type type)
+int cgroup_bpf_update(struct cgroup *cgrp, struct bpf_prog *prog,
+                     enum bpf_attach_type type, bool overridable)
 {
        struct cgroup *parent = cgroup_parent(cgrp);
+       int ret;
 
        mutex_lock(&cgroup_mutex);
-       __cgroup_bpf_update(cgrp, parent, prog, type);
+       ret = __cgroup_bpf_update(cgrp, parent, prog, type, overridable);
        mutex_unlock(&cgroup_mutex);
+       return ret;
 }
 #endif /* CONFIG_CGROUP_BPF */
 
index 435c14a451181c7a69fb49d513d5d659cd75e814..6605496569914d7a6bdd8ac2e94f331160027cfb 100644 (file)
@@ -82,19 +82,19 @@ void __delayacct_blkio_end(void)
 
 int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
 {
-       cputime_t utime, stime, stimescaled, utimescaled;
+       u64 utime, stime, stimescaled, utimescaled;
        unsigned long long t2, t3;
        unsigned long flags, t1;
        s64 tmp;
 
        task_cputime(tsk, &utime, &stime);
        tmp = (s64)d->cpu_run_real_total;
-       tmp += cputime_to_nsecs(utime + stime);
+       tmp += utime + stime;
        d->cpu_run_real_total = (tmp < (s64)d->cpu_run_real_total) ? 0 : tmp;
 
        task_cputime_scaled(tsk, &utimescaled, &stimescaled);
        tmp = (s64)d->cpu_scaled_run_real_total;
-       tmp += cputime_to_nsecs(utimescaled + stimescaled);
+       tmp += utimescaled + stimescaled;
        d->cpu_scaled_run_real_total =
                (tmp < (s64)d->cpu_scaled_run_real_total) ? 0 : tmp;
 
index 110b38a58493ee4ba4c19763d2678dae8815e1af..77a932b54a64fbeb2640b35c1cc4c096994bf1d7 100644 (file)
@@ -355,6 +355,8 @@ enum event_type_t {
        EVENT_FLEXIBLE = 0x1,
        EVENT_PINNED = 0x2,
        EVENT_TIME = 0x4,
+       /* see ctx_resched() for details */
+       EVENT_CPU = 0x8,
        EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
 };
 
@@ -678,6 +680,8 @@ perf_cgroup_set_timestamp(struct task_struct *task,
        info->timestamp = ctx->timestamp;
 }
 
+static DEFINE_PER_CPU(struct list_head, cgrp_cpuctx_list);
+
 #define PERF_CGROUP_SWOUT      0x1 /* cgroup switch out every event */
 #define PERF_CGROUP_SWIN       0x2 /* cgroup switch in events based on task */
 
@@ -690,61 +694,46 @@ perf_cgroup_set_timestamp(struct task_struct *task,
 static void perf_cgroup_switch(struct task_struct *task, int mode)
 {
        struct perf_cpu_context *cpuctx;
-       struct pmu *pmu;
+       struct list_head *list;
        unsigned long flags;
 
        /*
-        * disable interrupts to avoid geting nr_cgroup
-        * changes via __perf_event_disable(). Also
-        * avoids preemption.
+        * Disable interrupts and preemption to avoid this CPU's
+        * cgrp_cpuctx_entry to change under us.
         */
        local_irq_save(flags);
 
-       /*
-        * we reschedule only in the presence of cgroup
-        * constrained events.
-        */
-
-       list_for_each_entry_rcu(pmu, &pmus, entry) {
-               cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
-               if (cpuctx->unique_pmu != pmu)
-                       continue; /* ensure we process each cpuctx once */
+       list = this_cpu_ptr(&cgrp_cpuctx_list);
+       list_for_each_entry(cpuctx, list, cgrp_cpuctx_entry) {
+               WARN_ON_ONCE(cpuctx->ctx.nr_cgroups == 0);
 
-               /*
-                * perf_cgroup_events says at least one
-                * context on this CPU has cgroup events.
-                *
-                * ctx->nr_cgroups reports the number of cgroup
-                * events for a context.
-                */
-               if (cpuctx->ctx.nr_cgroups > 0) {
-                       perf_ctx_lock(cpuctx, cpuctx->task_ctx);
-                       perf_pmu_disable(cpuctx->ctx.pmu);
+               perf_ctx_lock(cpuctx, cpuctx->task_ctx);
+               perf_pmu_disable(cpuctx->ctx.pmu);
 
-                       if (mode & PERF_CGROUP_SWOUT) {
-                               cpu_ctx_sched_out(cpuctx, EVENT_ALL);
-                               /*
-                                * must not be done before ctxswout due
-                                * to event_filter_match() in event_sched_out()
-                                */
-                               cpuctx->cgrp = NULL;
-                       }
+               if (mode & PERF_CGROUP_SWOUT) {
+                       cpu_ctx_sched_out(cpuctx, EVENT_ALL);
+                       /*
+                        * must not be done before ctxswout due
+                        * to event_filter_match() in event_sched_out()
+                        */
+                       cpuctx->cgrp = NULL;
+               }
 
-                       if (mode & PERF_CGROUP_SWIN) {
-                               WARN_ON_ONCE(cpuctx->cgrp);
-                               /*
-                                * set cgrp before ctxsw in to allow
-                                * event_filter_match() to not have to pass
-                                * task around
-                                * we pass the cpuctx->ctx to perf_cgroup_from_task()
-                                * because cgorup events are only per-cpu
-                                */
-                               cpuctx->cgrp = perf_cgroup_from_task(task, &cpuctx->ctx);
-                               cpu_ctx_sched_in(cpuctx, EVENT_ALL, task);
-                       }
-                       perf_pmu_enable(cpuctx->ctx.pmu);
-                       perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
+               if (mode & PERF_CGROUP_SWIN) {
+                       WARN_ON_ONCE(cpuctx->cgrp);
+                       /*
+                        * set cgrp before ctxsw in to allow
+                        * event_filter_match() to not have to pass
+                        * task around
+                        * we pass the cpuctx->ctx to perf_cgroup_from_task()
+                        * because cgorup events are only per-cpu
+                        */
+                       cpuctx->cgrp = perf_cgroup_from_task(task,
+                                                            &cpuctx->ctx);
+                       cpu_ctx_sched_in(cpuctx, EVENT_ALL, task);
                }
+               perf_pmu_enable(cpuctx->ctx.pmu);
+               perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
        }
 
        local_irq_restore(flags);
@@ -889,6 +878,7 @@ list_update_cgroup_event(struct perf_event *event,
                         struct perf_event_context *ctx, bool add)
 {
        struct perf_cpu_context *cpuctx;
+       struct list_head *cpuctx_entry;
 
        if (!is_cgroup_event(event))
                return;
@@ -902,15 +892,16 @@ list_update_cgroup_event(struct perf_event *event,
         * this will always be called from the right CPU.
         */
        cpuctx = __get_cpu_context(ctx);
-
-       /*
-        * cpuctx->cgrp is NULL until a cgroup event is sched in or
-        * ctx->nr_cgroup == 0 .
-        */
-       if (add && perf_cgroup_from_task(current, ctx) == event->cgrp)
-               cpuctx->cgrp = event->cgrp;
-       else if (!add)
+       cpuctx_entry = &cpuctx->cgrp_cpuctx_entry;
+       /* cpuctx->cgrp is NULL unless a cgroup event is active in this CPU .*/
+       if (add) {
+               list_add(cpuctx_entry, this_cpu_ptr(&cgrp_cpuctx_list));
+               if (perf_cgroup_from_task(current, ctx) == event->cgrp)
+                       cpuctx->cgrp = event->cgrp;
+       } else {
+               list_del(cpuctx_entry);
                cpuctx->cgrp = NULL;
+       }
 }
 
 #else /* !CONFIG_CGROUP_PERF */
@@ -1453,6 +1444,20 @@ static void update_group_times(struct perf_event *leader)
                update_event_times(event);
 }
 
+static enum event_type_t get_event_type(struct perf_event *event)
+{
+       struct perf_event_context *ctx = event->ctx;
+       enum event_type_t event_type;
+
+       lockdep_assert_held(&ctx->lock);
+
+       event_type = event->attr.pinned ? EVENT_PINNED : EVENT_FLEXIBLE;
+       if (!ctx->task)
+               event_type |= EVENT_CPU;
+
+       return event_type;
+}
+
 static struct list_head *
 ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
 {
@@ -1469,7 +1474,6 @@ ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
 static void
 list_add_event(struct perf_event *event, struct perf_event_context *ctx)
 {
-
        lockdep_assert_held(&ctx->lock);
 
        WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
@@ -1624,6 +1628,8 @@ static void perf_group_attach(struct perf_event *event)
 {
        struct perf_event *group_leader = event->group_leader, *pos;
 
+       lockdep_assert_held(&event->ctx->lock);
+
        /*
         * We can have double attach due to group movement in perf_event_open.
         */
@@ -1697,6 +1703,8 @@ static void perf_group_detach(struct perf_event *event)
        struct perf_event *sibling, *tmp;
        struct list_head *list = NULL;
 
+       lockdep_assert_held(&event->ctx->lock);
+
        /*
         * We can have double detach due to exit/hot-unplug + close.
         */
@@ -1895,9 +1903,29 @@ __perf_remove_from_context(struct perf_event *event,
  */
 static void perf_remove_from_context(struct perf_event *event, unsigned long flags)
 {
-       lockdep_assert_held(&event->ctx->mutex);
+       struct perf_event_context *ctx = event->ctx;
+
+       lockdep_assert_held(&ctx->mutex);
 
        event_function_call(event, __perf_remove_from_context, (void *)flags);
+
+       /*
+        * The above event_function_call() can NO-OP when it hits
+        * TASK_TOMBSTONE. In that case we must already have been detached
+        * from the context (by perf_event_exit_event()) but the grouping
+        * might still be in-tact.
+        */
+       WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
+       if ((flags & DETACH_GROUP) &&
+           (event->attach_state & PERF_ATTACH_GROUP)) {
+               /*
+                * Since in that case we cannot possibly be scheduled, simply
+                * detach now.
+                */
+               raw_spin_lock_irq(&ctx->lock);
+               perf_group_detach(event);
+               raw_spin_unlock_irq(&ctx->lock);
+       }
 }
 
 /*
@@ -2203,7 +2231,8 @@ ctx_sched_in(struct perf_event_context *ctx,
             struct task_struct *task);
 
 static void task_ctx_sched_out(struct perf_cpu_context *cpuctx,
-                              struct perf_event_context *ctx)
+                              struct perf_event_context *ctx,
+                              enum event_type_t event_type)
 {
        if (!cpuctx->task_ctx)
                return;
@@ -2211,7 +2240,7 @@ static void task_ctx_sched_out(struct perf_cpu_context *cpuctx,
        if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
                return;
 
-       ctx_sched_out(ctx, cpuctx, EVENT_ALL);
+       ctx_sched_out(ctx, cpuctx, event_type);
 }
 
 static void perf_event_sched_in(struct perf_cpu_context *cpuctx,
@@ -2226,13 +2255,51 @@ static void perf_event_sched_in(struct perf_cpu_context *cpuctx,
                ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task);
 }
 
+/*
+ * We want to maintain the following priority of scheduling:
+ *  - CPU pinned (EVENT_CPU | EVENT_PINNED)
+ *  - task pinned (EVENT_PINNED)
+ *  - CPU flexible (EVENT_CPU | EVENT_FLEXIBLE)
+ *  - task flexible (EVENT_FLEXIBLE).
+ *
+ * In order to avoid unscheduling and scheduling back in everything every
+ * time an event is added, only do it for the groups of equal priority and
+ * below.
+ *
+ * This can be called after a batch operation on task events, in which case
+ * event_type is a bit mask of the types of events involved. For CPU events,
+ * event_type is only either EVENT_PINNED or EVENT_FLEXIBLE.
+ */
 static void ctx_resched(struct perf_cpu_context *cpuctx,
-                       struct perf_event_context *task_ctx)
+                       struct perf_event_context *task_ctx,
+                       enum event_type_t event_type)
 {
+       enum event_type_t ctx_event_type = event_type & EVENT_ALL;
+       bool cpu_event = !!(event_type & EVENT_CPU);
+
+       /*
+        * If pinned groups are involved, flexible groups also need to be
+        * scheduled out.
+        */
+       if (event_type & EVENT_PINNED)
+               event_type |= EVENT_FLEXIBLE;
+
        perf_pmu_disable(cpuctx->ctx.pmu);
        if (task_ctx)
-               task_ctx_sched_out(cpuctx, task_ctx);
-       cpu_ctx_sched_out(cpuctx, EVENT_ALL);
+               task_ctx_sched_out(cpuctx, task_ctx, event_type);
+
+       /*
+        * Decide which cpu ctx groups to schedule out based on the types
+        * of events that caused rescheduling:
+        *  - EVENT_CPU: schedule out corresponding groups;
+        *  - EVENT_PINNED task events: schedule out EVENT_FLEXIBLE groups;
+        *  - otherwise, do nothing more.
+        */
+       if (cpu_event)
+               cpu_ctx_sched_out(cpuctx, ctx_event_type);
+       else if (ctx_event_type & EVENT_PINNED)
+               cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
+
        perf_event_sched_in(cpuctx, task_ctx, current);
        perf_pmu_enable(cpuctx->ctx.pmu);
 }
@@ -2279,7 +2346,7 @@ static int  __perf_install_in_context(void *info)
        if (reprogram) {
                ctx_sched_out(ctx, cpuctx, EVENT_TIME);
                add_event_to_ctx(event, ctx);
-               ctx_resched(cpuctx, task_ctx);
+               ctx_resched(cpuctx, task_ctx, get_event_type(event));
        } else {
                add_event_to_ctx(event, ctx);
        }
@@ -2446,7 +2513,7 @@ static void __perf_event_enable(struct perf_event *event,
        if (ctx->task)
                WARN_ON_ONCE(task_ctx != ctx);
 
-       ctx_resched(cpuctx, task_ctx);
+       ctx_resched(cpuctx, task_ctx, get_event_type(event));
 }
 
 /*
@@ -2873,7 +2940,7 @@ unlock:
 
        if (do_switch) {
                raw_spin_lock(&ctx->lock);
-               task_ctx_sched_out(cpuctx, ctx);
+               task_ctx_sched_out(cpuctx, ctx, EVENT_ALL);
                raw_spin_unlock(&ctx->lock);
        }
 }
@@ -2920,7 +2987,7 @@ static void perf_pmu_sched_task(struct task_struct *prev,
                return;
 
        list_for_each_entry(cpuctx, this_cpu_ptr(&sched_cb_list), sched_cb_entry) {
-               pmu = cpuctx->unique_pmu; /* software PMUs will not have sched_task */
+               pmu = cpuctx->ctx.pmu; /* software PMUs will not have sched_task */
 
                if (WARN_ON_ONCE(!pmu->sched_task))
                        continue;
@@ -3110,8 +3177,12 @@ static void perf_event_context_sched_in(struct perf_event_context *ctx,
         * We want to keep the following priority order:
         * cpu pinned (that don't need to move), task pinned,
         * cpu flexible, task flexible.
+        *
+        * However, if task's ctx is not carrying any pinned
+        * events, no need to flip the cpuctx's events around.
         */
-       cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
+       if (!list_empty(&ctx->pinned_groups))
+               cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
        perf_event_sched_in(cpuctx, ctx, task);
        perf_pmu_enable(ctx->pmu);
        perf_ctx_unlock(cpuctx, ctx);
@@ -3426,6 +3497,7 @@ static int event_enable_on_exec(struct perf_event *event,
 static void perf_event_enable_on_exec(int ctxn)
 {
        struct perf_event_context *ctx, *clone_ctx = NULL;
+       enum event_type_t event_type = 0;
        struct perf_cpu_context *cpuctx;
        struct perf_event *event;
        unsigned long flags;
@@ -3439,15 +3511,17 @@ static void perf_event_enable_on_exec(int ctxn)
        cpuctx = __get_cpu_context(ctx);
        perf_ctx_lock(cpuctx, ctx);
        ctx_sched_out(ctx, cpuctx, EVENT_TIME);
-       list_for_each_entry(event, &ctx->event_list, event_entry)
+       list_for_each_entry(event, &ctx->event_list, event_entry) {
                enabled |= event_enable_on_exec(event, ctx);
+               event_type |= get_event_type(event);
+       }
 
        /*
         * Unclone and reschedule this context if we enabled any event.
         */
        if (enabled) {
                clone_ctx = unclone_ctx(ctx);
-               ctx_resched(cpuctx, ctx);
+               ctx_resched(cpuctx, ctx, event_type);
        }
        perf_ctx_unlock(cpuctx, ctx);
 
@@ -3464,14 +3538,15 @@ struct perf_read_data {
        int ret;
 };
 
-static int find_cpu_to_read(struct perf_event *event, int local_cpu)
+static int __perf_event_read_cpu(struct perf_event *event, int event_cpu)
 {
-       int event_cpu = event->oncpu;
        u16 local_pkg, event_pkg;
 
        if (event->group_caps & PERF_EV_CAP_READ_ACTIVE_PKG) {
-               event_pkg =  topology_physical_package_id(event_cpu);
-               local_pkg =  topology_physical_package_id(local_cpu);
+               int local_cpu = smp_processor_id();
+
+               event_pkg = topology_physical_package_id(event_cpu);
+               local_pkg = topology_physical_package_id(local_cpu);
 
                if (event_pkg == local_pkg)
                        return local_cpu;
@@ -3601,7 +3676,7 @@ u64 perf_event_read_local(struct perf_event *event)
 
 static int perf_event_read(struct perf_event *event, bool group)
 {
-       int ret = 0, cpu_to_read, local_cpu;
+       int event_cpu, ret = 0;
 
        /*
         * If event is enabled and currently active on a CPU, update the
@@ -3614,21 +3689,25 @@ static int perf_event_read(struct perf_event *event, bool group)
                        .ret = 0,
                };
 
-               local_cpu = get_cpu();
-               cpu_to_read = find_cpu_to_read(event, local_cpu);
-               put_cpu();
+               event_cpu = READ_ONCE(event->oncpu);
+               if ((unsigned)event_cpu >= nr_cpu_ids)
+                       return 0;
+
+               preempt_disable();
+               event_cpu = __perf_event_read_cpu(event, event_cpu);
 
                /*
                 * Purposely ignore the smp_call_function_single() return
                 * value.
                 *
-                * If event->oncpu isn't a valid CPU it means the event got
+                * If event_cpu isn't a valid CPU it means the event got
                 * scheduled out and that will have updated the event count.
                 *
                 * Therefore, either way, we'll have an up-to-date event count
                 * after this.
                 */
-               (void)smp_call_function_single(cpu_to_read, __perf_event_read, &data, 1);
+               (void)smp_call_function_single(event_cpu, __perf_event_read, &data, 1);
+               preempt_enable();
                ret = data.ret;
        } else if (event->state == PERF_EVENT_STATE_INACTIVE) {
                struct perf_event_context *ctx = event->ctx;
@@ -6609,6 +6688,27 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
        char *buf = NULL;
        char *name;
 
+       if (vma->vm_flags & VM_READ)
+               prot |= PROT_READ;
+       if (vma->vm_flags & VM_WRITE)
+               prot |= PROT_WRITE;
+       if (vma->vm_flags & VM_EXEC)
+               prot |= PROT_EXEC;
+
+       if (vma->vm_flags & VM_MAYSHARE)
+               flags = MAP_SHARED;
+       else
+               flags = MAP_PRIVATE;
+
+       if (vma->vm_flags & VM_DENYWRITE)
+               flags |= MAP_DENYWRITE;
+       if (vma->vm_flags & VM_MAYEXEC)
+               flags |= MAP_EXECUTABLE;
+       if (vma->vm_flags & VM_LOCKED)
+               flags |= MAP_LOCKED;
+       if (vma->vm_flags & VM_HUGETLB)
+               flags |= MAP_HUGETLB;
+
        if (file) {
                struct inode *inode;
                dev_t dev;
@@ -6635,27 +6735,6 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
                maj = MAJOR(dev);
                min = MINOR(dev);
 
-               if (vma->vm_flags & VM_READ)
-                       prot |= PROT_READ;
-               if (vma->vm_flags & VM_WRITE)
-                       prot |= PROT_WRITE;
-               if (vma->vm_flags & VM_EXEC)
-                       prot |= PROT_EXEC;
-
-               if (vma->vm_flags & VM_MAYSHARE)
-                       flags = MAP_SHARED;
-               else
-                       flags = MAP_PRIVATE;
-
-               if (vma->vm_flags & VM_DENYWRITE)
-                       flags |= MAP_DENYWRITE;
-               if (vma->vm_flags & VM_MAYEXEC)
-                       flags |= MAP_EXECUTABLE;
-               if (vma->vm_flags & VM_LOCKED)
-                       flags |= MAP_LOCKED;
-               if (vma->vm_flags & VM_HUGETLB)
-                       flags |= MAP_HUGETLB;
-
                goto got_name;
        } else {
                if (vma->vm_ops && vma->vm_ops->name) {
@@ -8016,6 +8095,9 @@ static void perf_event_addr_filters_apply(struct perf_event *event)
        if (task == TASK_TOMBSTONE)
                return;
 
+       if (!ifh->nr_file_filters)
+               return;
+
        mm = get_task_mm(event->ctx->task);
        if (!mm)
                goto restart;
@@ -8186,6 +8268,7 @@ perf_event_parse_addr_filter(struct perf_event *event, char *fstr,
                 * attribute.
                 */
                if (state == IF_STATE_END) {
+                       ret = -EINVAL;
                        if (kernel && event->attr.exclude_kernel)
                                goto fail;
 
@@ -8193,6 +8276,18 @@ perf_event_parse_addr_filter(struct perf_event *event, char *fstr,
                                if (!filename)
                                        goto fail;
 
+                               /*
+                                * For now, we only support file-based filters
+                                * in per-task events; doing so for CPU-wide
+                                * events requires additional context switching
+                                * trickery, since same object code will be
+                                * mapped at different virtual addresses in
+                                * different processes.
+                                */
+                               ret = -EOPNOTSUPP;
+                               if (!event->ctx->task)
+                                       goto fail_free_name;
+
                                /* look up the path and grab its inode */
                                ret = kern_path(filename, LOOKUP_FOLLOW, &path);
                                if (ret)
@@ -8208,6 +8303,8 @@ perf_event_parse_addr_filter(struct perf_event *event, char *fstr,
                                    !S_ISREG(filter->inode->i_mode))
                                        /* free_filters_list() will iput() */
                                        goto fail;
+
+                               event->addr_filters.nr_file_filters++;
                        }
 
                        /* ready to consume more filters */
@@ -8247,24 +8344,13 @@ perf_event_set_addr_filter(struct perf_event *event, char *filter_str)
        if (WARN_ON_ONCE(event->parent))
                return -EINVAL;
 
-       /*
-        * For now, we only support filtering in per-task events; doing so
-        * for CPU-wide events requires additional context switching trickery,
-        * since same object code will be mapped at different virtual
-        * addresses in different processes.
-        */
-       if (!event->ctx->task)
-               return -EOPNOTSUPP;
-
        ret = perf_event_parse_addr_filter(event, filter_str, &filters);
        if (ret)
-               return ret;
+               goto fail_clear_files;
 
        ret = event->pmu->addr_filters_validate(&filters);
-       if (ret) {
-               free_filters_list(&filters);
-               return ret;
-       }
+       if (ret)
+               goto fail_free_filters;
 
        /* remove existing filters, if any */
        perf_addr_filters_splice(event, &filters);
@@ -8272,6 +8358,14 @@ perf_event_set_addr_filter(struct perf_event *event, char *filter_str)
        /* install new filters */
        perf_event_for_each_child(event, perf_event_addr_filters_apply);
 
+       return ret;
+
+fail_free_filters:
+       free_filters_list(&filters);
+
+fail_clear_files:
+       event->addr_filters.nr_file_filters = 0;
+
        return ret;
 }
 
@@ -8624,37 +8718,10 @@ static struct perf_cpu_context __percpu *find_pmu_context(int ctxn)
        return NULL;
 }
 
-static void update_pmu_context(struct pmu *pmu, struct pmu *old_pmu)
-{
-       int cpu;
-
-       for_each_possible_cpu(cpu) {
-               struct perf_cpu_context *cpuctx;
-
-               cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
-
-               if (cpuctx->unique_pmu == old_pmu)
-                       cpuctx->unique_pmu = pmu;
-       }
-}
-
 static void free_pmu_context(struct pmu *pmu)
 {
-       struct pmu *i;
-
        mutex_lock(&pmus_lock);
-       /*
-        * Like a real lame refcount.
-        */
-       list_for_each_entry(i, &pmus, entry) {
-               if (i->pmu_cpu_context == pmu->pmu_cpu_context) {
-                       update_pmu_context(i, pmu);
-                       goto out;
-               }
-       }
-
        free_percpu(pmu->pmu_cpu_context);
-out:
        mutex_unlock(&pmus_lock);
 }
 
@@ -8858,8 +8925,6 @@ skip_type:
                cpuctx->ctx.pmu = pmu;
 
                __perf_mux_hrtimer_init(cpuctx, cpu);
-
-               cpuctx->unique_pmu = pmu;
        }
 
 got_cpu_context:
@@ -8977,6 +9042,14 @@ static struct pmu *perf_init_event(struct perf_event *event)
 
        idx = srcu_read_lock(&pmus_srcu);
 
+       /* Try parent's PMU first: */
+       if (event->parent && event->parent->pmu) {
+               pmu = event->parent->pmu;
+               ret = perf_try_init_event(pmu, event);
+               if (!ret)
+                       goto unlock;
+       }
+
        rcu_read_lock();
        pmu = idr_find(&pmu_idr, event->attr.type);
        rcu_read_unlock();
@@ -10237,7 +10310,7 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
         * in.
         */
        raw_spin_lock_irq(&child_ctx->lock);
-       task_ctx_sched_out(__get_cpu_context(child_ctx), child_ctx);
+       task_ctx_sched_out(__get_cpu_context(child_ctx), child_ctx, EVENT_ALL);
 
        /*
         * Now that the context is inactive, destroy the task <-> ctx relation
@@ -10686,6 +10759,9 @@ static void __init perf_event_init_all_cpus(void)
                INIT_LIST_HEAD(&per_cpu(pmu_sb_events.list, cpu));
                raw_spin_lock_init(&per_cpu(pmu_sb_events.lock, cpu));
 
+#ifdef CONFIG_CGROUP_PERF
+               INIT_LIST_HEAD(&per_cpu(cgrp_cpuctx_list, cpu));
+#endif
                INIT_LIST_HEAD(&per_cpu(sched_cb_list, cpu));
        }
 }
index 8f14b866f9f616adcf13f2fde5c5652fb113cb42..b67c57faa705d991f87b13e6b17bde64afea9131 100644 (file)
@@ -55,6 +55,7 @@
 #include <linux/shm.h>
 #include <linux/kcov.h>
 #include <linux/random.h>
+#include <linux/rcuwait.h>
 
 #include <linux/uaccess.h>
 #include <asm/unistd.h>
@@ -86,7 +87,7 @@ static void __exit_signal(struct task_struct *tsk)
        bool group_dead = thread_group_leader(tsk);
        struct sighand_struct *sighand;
        struct tty_struct *uninitialized_var(tty);
-       cputime_t utime, stime;
+       u64 utime, stime;
 
        sighand = rcu_dereference_check(tsk->sighand,
                                        lockdep_tasklist_lock_is_held());
@@ -282,6 +283,35 @@ retry:
        return task;
 }
 
+void rcuwait_wake_up(struct rcuwait *w)
+{
+       struct task_struct *task;
+
+       rcu_read_lock();
+
+       /*
+        * Order condition vs @task, such that everything prior to the load
+        * of @task is visible. This is the condition as to why the user called
+        * rcuwait_trywake() in the first place. Pairs with set_current_state()
+        * barrier (A) in rcuwait_wait_event().
+        *
+        *    WAIT                WAKE
+        *    [S] tsk = current   [S] cond = true
+        *        MB (A)              MB (B)
+        *    [L] cond            [L] tsk
+        */
+       smp_rmb(); /* (B) */
+
+       /*
+        * Avoid using task_rcu_dereference() magic as long as we are careful,
+        * see comment in rcuwait_wait_event() regarding ->exit_state.
+        */
+       task = rcu_dereference(w->task);
+       if (task)
+               wake_up_process(task);
+       rcu_read_unlock();
+}
+
 struct task_struct *try_get_task_struct(struct task_struct **ptask)
 {
        struct task_struct *task;
@@ -468,12 +498,12 @@ assign_new_owner:
  * Turn us into a lazy TLB process if we
  * aren't already..
  */
-static void exit_mm(struct task_struct *tsk)
+static void exit_mm(void)
 {
-       struct mm_struct *mm = tsk->mm;
+       struct mm_struct *mm = current->mm;
        struct core_state *core_state;
 
-       mm_release(tsk, mm);
+       mm_release(current, mm);
        if (!mm)
                return;
        sync_mm_rss(mm);
@@ -491,7 +521,7 @@ static void exit_mm(struct task_struct *tsk)
 
                up_read(&mm->mmap_sem);
 
-               self.task = tsk;
+               self.task = current;
                self.next = xchg(&core_state->dumper.next, &self);
                /*
                 * Implies mb(), the result of xchg() must be visible
@@ -501,22 +531,22 @@ static void exit_mm(struct task_struct *tsk)
                        complete(&core_state->startup);
 
                for (;;) {
-                       set_task_state(tsk, TASK_UNINTERRUPTIBLE);
+                       set_current_state(TASK_UNINTERRUPTIBLE);
                        if (!self.task) /* see coredump_finish() */
                                break;
                        freezable_schedule();
                }
-               __set_task_state(tsk, TASK_RUNNING);
+               __set_current_state(TASK_RUNNING);
                down_read(&mm->mmap_sem);
        }
        atomic_inc(&mm->mm_count);
-       BUG_ON(mm != tsk->active_mm);
+       BUG_ON(mm != current->active_mm);
        /* more a memory barrier than a real lock */
-       task_lock(tsk);
-       tsk->mm = NULL;
+       task_lock(current);
+       current->mm = NULL;
        up_read(&mm->mmap_sem);
        enter_lazy_tlb(mm, current);
-       task_unlock(tsk);
+       task_unlock(current);
        mm_update_next_owner(mm);
        mmput(mm);
        if (test_thread_flag(TIF_MEMDIE))
@@ -823,7 +853,7 @@ void __noreturn do_exit(long code)
        tsk->exit_code = code;
        taskstats_exit(tsk, group_dead);
 
-       exit_mm(tsk);
+       exit_mm();
 
        if (group_dead)
                acct_process();
@@ -1091,7 +1121,7 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
                struct signal_struct *sig = p->signal;
                struct signal_struct *psig = current->signal;
                unsigned long maxrss;
-               cputime_t tgutime, tgstime;
+               u64 tgutime, tgstime;
 
                /*
                 * The resource counters for the group leader are in its
index e3beec4a2339ee1c013698beb4c9ca126d1e2121..e1359474baa5a55288a37386f290cb68b61ddd85 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/module.h>
 #include <linux/mutex.h>
 #include <linux/init.h>
+#include <linux/kprobes.h>
 
 #include <asm/sections.h>
 #include <linux/uaccess.h>
@@ -104,6 +105,8 @@ int __kernel_text_address(unsigned long addr)
                return 1;
        if (is_ftrace_trampoline(addr))
                return 1;
+       if (is_kprobe_optinsn_slot(addr) || is_kprobe_insn_slot(addr))
+               return 1;
        /*
         * There might be init symbols in saved stacktraces.
         * Give those symbols a chance to be printed in
@@ -123,7 +126,11 @@ int kernel_text_address(unsigned long addr)
                return 1;
        if (is_module_text_address(addr))
                return 1;
-       return is_ftrace_trampoline(addr);
+       if (is_ftrace_trampoline(addr))
+               return 1;
+       if (is_kprobe_optinsn_slot(addr) || is_kprobe_insn_slot(addr))
+               return 1;
+       return 0;
 }
 
 /*
index 11c5c8ab827c4be8ef8cb09072de2364d90aff6c..ff82e24573b6d07e7ccaf37d1d765311d48a1398 100644 (file)
@@ -432,11 +432,13 @@ void __init fork_init(void)
        int i;
 #ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR
 #ifndef ARCH_MIN_TASKALIGN
-#define ARCH_MIN_TASKALIGN     L1_CACHE_BYTES
+#define ARCH_MIN_TASKALIGN     0
 #endif
+       int align = max_t(int, L1_CACHE_BYTES, ARCH_MIN_TASKALIGN);
+
        /* create a slab on which task_structs can be allocated */
        task_struct_cachep = kmem_cache_create("task_struct",
-                       arch_task_struct_size, ARCH_MIN_TASKALIGN,
+                       arch_task_struct_size, align,
                        SLAB_PANIC|SLAB_NOTRACK|SLAB_ACCOUNT, NULL);
 #endif
 
@@ -1304,6 +1306,7 @@ void __cleanup_sighand(struct sighand_struct *sighand)
        }
 }
 
+#ifdef CONFIG_POSIX_TIMERS
 /*
  * Initialize POSIX timer handling for a thread group.
  */
@@ -1313,7 +1316,7 @@ static void posix_cpu_timers_init_group(struct signal_struct *sig)
 
        cpu_limit = READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur);
        if (cpu_limit != RLIM_INFINITY) {
-               sig->cputime_expires.prof_exp = secs_to_cputime(cpu_limit);
+               sig->cputime_expires.prof_exp = cpu_limit * NSEC_PER_SEC;
                sig->cputimer.running = true;
        }
 
@@ -1322,6 +1325,9 @@ static void posix_cpu_timers_init_group(struct signal_struct *sig)
        INIT_LIST_HEAD(&sig->cpu_timers[1]);
        INIT_LIST_HEAD(&sig->cpu_timers[2]);
 }
+#else
+static inline void posix_cpu_timers_init_group(struct signal_struct *sig) { }
+#endif
 
 static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
 {
@@ -1346,11 +1352,11 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
        init_waitqueue_head(&sig->wait_chldexit);
        sig->curr_target = tsk;
        init_sigpending(&sig->shared_pending);
-       INIT_LIST_HEAD(&sig->posix_timers);
        seqlock_init(&sig->stats_lock);
        prev_cputime_init(&sig->prev_cputime);
 
 #ifdef CONFIG_POSIX_TIMERS
+       INIT_LIST_HEAD(&sig->posix_timers);
        hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
        sig->real_timer.function = it_real_fn;
 #endif
@@ -1425,6 +1431,7 @@ static void rt_mutex_init_task(struct task_struct *p)
 #endif
 }
 
+#ifdef CONFIG_POSIX_TIMERS
 /*
  * Initialize POSIX timer handling for a single task.
  */
@@ -1437,6 +1444,9 @@ static void posix_cpu_timers_init(struct task_struct *tsk)
        INIT_LIST_HEAD(&tsk->cpu_timers[1]);
        INIT_LIST_HEAD(&tsk->cpu_timers[2]);
 }
+#else
+static inline void posix_cpu_timers_init(struct task_struct *tsk) { }
+#endif
 
 static inline void
 init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid)
index 0842c8ca534b6c7760b254163d88f58cb1229b12..cdf3650361414e39e97018012871579c1eda76f6 100644 (file)
@@ -3323,4 +3323,4 @@ static int __init futex_init(void)
 
        return 0;
 }
-__initcall(futex_init);
+core_initcall(futex_init);
index 74d90a75426881881cc7e42ff3fcdba91efd76c9..1613bfd483657ceea438362c1f0c6eb69823785e 100644 (file)
@@ -2,6 +2,7 @@
 #include <linux/interrupt.h>
 #include <linux/device.h>
 #include <linux/gfp.h>
+#include <linux/irq.h>
 
 /*
  * Device resource management aware IRQ request/free implementation.
@@ -33,7 +34,7 @@ static int devm_irq_match(struct device *dev, void *res, void *data)
  *     @thread_fn: function to be called in a threaded interrupt context. NULL
  *                 for devices which handle everything in @handler
  *     @irqflags: Interrupt type flags
- *     @devname: An ascii name for the claiming device
+ *     @devname: An ascii name for the claiming device, dev_name(dev) if NULL
  *     @dev_id: A cookie passed back to the handler function
  *
  *     Except for the extra @dev argument, this function takes the
@@ -57,6 +58,9 @@ int devm_request_threaded_irq(struct device *dev, unsigned int irq,
        if (!dr)
                return -ENOMEM;
 
+       if (!devname)
+               devname = dev_name(dev);
+
        rc = request_threaded_irq(irq, handler, thread_fn, irqflags, devname,
                                  dev_id);
        if (rc) {
@@ -80,7 +84,7 @@ EXPORT_SYMBOL(devm_request_threaded_irq);
  *     @thread_fn: function to be called in a threaded interrupt context. NULL
  *                 for devices which handle everything in @handler
  *     @irqflags: Interrupt type flags
- *     @devname: An ascii name for the claiming device
+ *     @devname: An ascii name for the claiming device, dev_name(dev) if NULL
  *     @dev_id: A cookie passed back to the handler function
  *
  *     Except for the extra @dev argument, this function takes the
@@ -103,6 +107,9 @@ int devm_request_any_context_irq(struct device *dev, unsigned int irq,
        if (!dr)
                return -ENOMEM;
 
+       if (!devname)
+               devname = dev_name(dev);
+
        rc = request_any_context_irq(irq, handler, irqflags, devname, dev_id);
        if (rc < 0) {
                devres_free(dr);
@@ -137,3 +144,57 @@ void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id)
        free_irq(irq, dev_id);
 }
 EXPORT_SYMBOL(devm_free_irq);
+
+struct irq_desc_devres {
+       unsigned int from;
+       unsigned int cnt;
+};
+
+static void devm_irq_desc_release(struct device *dev, void *res)
+{
+       struct irq_desc_devres *this = res;
+
+       irq_free_descs(this->from, this->cnt);
+}
+
+/**
+ * __devm_irq_alloc_descs - Allocate and initialize a range of irq descriptors
+ *                         for a managed device
+ * @dev:       Device to allocate the descriptors for
+ * @irq:       Allocate for specific irq number if irq >= 0
+ * @from:      Start the search from this irq number
+ * @cnt:       Number of consecutive irqs to allocate
+ * @node:      Preferred node on which the irq descriptor should be allocated
+ * @owner:     Owning module (can be NULL)
+ * @affinity:  Optional pointer to an affinity mask array of size @cnt
+ *             which hints where the irq descriptors should be allocated
+ *             and which default affinities to use
+ *
+ * Returns the first irq number or error code.
+ *
+ * Note: Use the provided wrappers (devm_irq_alloc_desc*) for simplicity.
+ */
+int __devm_irq_alloc_descs(struct device *dev, int irq, unsigned int from,
+                          unsigned int cnt, int node, struct module *owner,
+                          const struct cpumask *affinity)
+{
+       struct irq_desc_devres *dr;
+       int base;
+
+       dr = devres_alloc(devm_irq_desc_release, sizeof(*dr), GFP_KERNEL);
+       if (!dr)
+               return -ENOMEM;
+
+       base = __irq_alloc_descs(irq, from, cnt, node, owner, affinity);
+       if (base < 0) {
+               devres_free(dr);
+               return base;
+       }
+
+       dr->from = base;
+       dr->cnt = cnt;
+       devres_add(dev, dr);
+
+       return base;
+}
+EXPORT_SYMBOL_GPL(__devm_irq_alloc_descs);
index 8c0a0ae43521c7f8b9e97964912cc3ab5e10fd15..31805f237396bdfb6f4d72a906c0dcb957b6ceaf 100644 (file)
@@ -277,6 +277,31 @@ struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec,
 }
 EXPORT_SYMBOL_GPL(irq_find_matching_fwspec);
 
+/**
+ * irq_domain_check_msi_remap - Check whether all MSI irq domains implement
+ * IRQ remapping
+ *
+ * Return: false if any MSI irq domain does not support IRQ remapping,
+ * true otherwise (including if there is no MSI irq domain)
+ */
+bool irq_domain_check_msi_remap(void)
+{
+       struct irq_domain *h;
+       bool ret = true;
+
+       mutex_lock(&irq_domain_mutex);
+       list_for_each_entry(h, &irq_domain_list, link) {
+               if (irq_domain_is_msi(h) &&
+                   !irq_domain_hierarchical_is_msi_remap(h)) {
+                       ret = false;
+                       break;
+               }
+       }
+       mutex_unlock(&irq_domain_mutex);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(irq_domain_check_msi_remap);
+
 /**
  * irq_set_default_host() - Set a "default" irq domain
  * @domain: default domain pointer
@@ -1346,6 +1371,30 @@ void irq_domain_free_irqs_parent(struct irq_domain *domain,
 }
 EXPORT_SYMBOL_GPL(irq_domain_free_irqs_parent);
 
+static void __irq_domain_activate_irq(struct irq_data *irq_data)
+{
+       if (irq_data && irq_data->domain) {
+               struct irq_domain *domain = irq_data->domain;
+
+               if (irq_data->parent_data)
+                       __irq_domain_activate_irq(irq_data->parent_data);
+               if (domain->ops->activate)
+                       domain->ops->activate(domain, irq_data);
+       }
+}
+
+static void __irq_domain_deactivate_irq(struct irq_data *irq_data)
+{
+       if (irq_data && irq_data->domain) {
+               struct irq_domain *domain = irq_data->domain;
+
+               if (domain->ops->deactivate)
+                       domain->ops->deactivate(domain, irq_data);
+               if (irq_data->parent_data)
+                       __irq_domain_deactivate_irq(irq_data->parent_data);
+       }
+}
+
 /**
  * irq_domain_activate_irq - Call domain_ops->activate recursively to activate
  *                          interrupt
@@ -1356,13 +1405,9 @@ EXPORT_SYMBOL_GPL(irq_domain_free_irqs_parent);
  */
 void irq_domain_activate_irq(struct irq_data *irq_data)
 {
-       if (irq_data && irq_data->domain) {
-               struct irq_domain *domain = irq_data->domain;
-
-               if (irq_data->parent_data)
-                       irq_domain_activate_irq(irq_data->parent_data);
-               if (domain->ops->activate)
-                       domain->ops->activate(domain, irq_data);
+       if (!irqd_is_activated(irq_data)) {
+               __irq_domain_activate_irq(irq_data);
+               irqd_set_activated(irq_data);
        }
 }
 
@@ -1376,13 +1421,9 @@ void irq_domain_activate_irq(struct irq_data *irq_data)
  */
 void irq_domain_deactivate_irq(struct irq_data *irq_data)
 {
-       if (irq_data && irq_data->domain) {
-               struct irq_domain *domain = irq_data->domain;
-
-               if (domain->ops->deactivate)
-                       domain->ops->deactivate(domain, irq_data);
-               if (irq_data->parent_data)
-                       irq_domain_deactivate_irq(irq_data->parent_data);
+       if (irqd_is_activated(irq_data)) {
+               __irq_domain_deactivate_irq(irq_data);
+               irqd_clr_activated(irq_data);
        }
 }
 
@@ -1392,6 +1433,20 @@ static void irq_domain_check_hierarchy(struct irq_domain *domain)
        if (domain->ops->alloc)
                domain->flags |= IRQ_DOMAIN_FLAG_HIERARCHY;
 }
+
+/**
+ * irq_domain_hierarchical_is_msi_remap - Check if the domain or any
+ * parent has MSI remapping support
+ * @domain: domain pointer
+ */
+bool irq_domain_hierarchical_is_msi_remap(struct irq_domain *domain)
+{
+       for (; domain; domain = domain->parent) {
+               if (irq_domain_is_msi_remap(domain))
+                       return true;
+       }
+       return false;
+}
 #else  /* CONFIG_IRQ_DOMAIN_HIERARCHY */
 /**
  * irq_domain_get_irq_data - Get irq_data associated with @virq and @domain
index ee230063f033ca8da380516e2d722068b094c07f..ddc2f5427f75c57e09bc2e5ba9d655fa35c6e288 100644 (file)
@@ -270,8 +270,8 @@ struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode,
        if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
                msi_domain_update_chip_ops(info);
 
-       return irq_domain_create_hierarchy(parent, 0, 0, fwnode,
-                                          &msi_domain_ops, info);
+       return irq_domain_create_hierarchy(parent, IRQ_DOMAIN_FLAG_MSI, 0,
+                                          fwnode, &msi_domain_ops, info);
 }
 
 int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev,
index feaa813b84a987a20f195a7eb197482abb3e2015..c53edad7b459d99e8dd98dccab743bf86c2023a2 100644 (file)
@@ -487,6 +487,8 @@ int show_interrupts(struct seq_file *p, void *v)
        }
        if (desc->irq_data.domain)
                seq_printf(p, " %*d", prec, (int) desc->irq_data.hwirq);
+       else
+               seq_printf(p, " %*s", prec, "");
 #ifdef CONFIG_GENERIC_IRQ_SHOW_LEVEL
        seq_printf(p, " %-8s", irqd_is_level_type(&desc->irq_data) ? "Level" : "Edge");
 #endif
index 5707f97a3e6ac50954f8d407d5a4aadcaa2b96fc..061ba7eed4edf77249b0a99df771e3f6d0dc26e0 100644 (file)
@@ -175,7 +175,9 @@ out:
 
 static inline int bad_action_ret(irqreturn_t action_ret)
 {
-       if (likely(action_ret <= (IRQ_HANDLED | IRQ_WAKE_THREAD)))
+       unsigned int r = action_ret;
+
+       if (likely(r <= (IRQ_HANDLED | IRQ_WAKE_THREAD)))
                return 0;
        return 1;
 }
index 43460104f119d070841ba1af1a51536dc8017f61..ebb4dadca66b1a66b3fb23260a2e692b6c22e396 100644 (file)
@@ -149,9 +149,11 @@ kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c)
        struct kprobe_insn_page *kip;
        kprobe_opcode_t *slot = NULL;
 
+       /* Since the slot array is not protected by rcu, we need a mutex */
        mutex_lock(&c->mutex);
  retry:
-       list_for_each_entry(kip, &c->pages, list) {
+       rcu_read_lock();
+       list_for_each_entry_rcu(kip, &c->pages, list) {
                if (kip->nused < slots_per_page(c)) {
                        int i;
                        for (i = 0; i < slots_per_page(c); i++) {
@@ -159,6 +161,7 @@ kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c)
                                        kip->slot_used[i] = SLOT_USED;
                                        kip->nused++;
                                        slot = kip->insns + (i * c->insn_size);
+                                       rcu_read_unlock();
                                        goto out;
                                }
                        }
@@ -167,6 +170,7 @@ kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c)
                        WARN_ON(1);
                }
        }
+       rcu_read_unlock();
 
        /* If there are any garbage slots, collect it and try again. */
        if (c->nr_garbage && collect_garbage_slots(c) == 0)
@@ -193,7 +197,7 @@ kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c)
        kip->nused = 1;
        kip->ngarbage = 0;
        kip->cache = c;
-       list_add(&kip->list, &c->pages);
+       list_add_rcu(&kip->list, &c->pages);
        slot = kip->insns;
 out:
        mutex_unlock(&c->mutex);
@@ -213,7 +217,8 @@ static int collect_one_slot(struct kprobe_insn_page *kip, int idx)
                 * next time somebody inserts a probe.
                 */
                if (!list_is_singular(&kip->list)) {
-                       list_del(&kip->list);
+                       list_del_rcu(&kip->list);
+                       synchronize_rcu();
                        kip->cache->free(kip->insns);
                        kfree(kip);
                }
@@ -235,8 +240,7 @@ static int collect_garbage_slots(struct kprobe_insn_cache *c)
                        continue;
                kip->ngarbage = 0;      /* we will collect all garbages */
                for (i = 0; i < slots_per_page(c); i++) {
-                       if (kip->slot_used[i] == SLOT_DIRTY &&
-                           collect_one_slot(kip, i))
+                       if (kip->slot_used[i] == SLOT_DIRTY && collect_one_slot(kip, i))
                                break;
                }
        }
@@ -248,29 +252,60 @@ void __free_insn_slot(struct kprobe_insn_cache *c,
                      kprobe_opcode_t *slot, int dirty)
 {
        struct kprobe_insn_page *kip;
+       long idx;
 
        mutex_lock(&c->mutex);
-       list_for_each_entry(kip, &c->pages, list) {
-               long idx = ((long)slot - (long)kip->insns) /
-                               (c->insn_size * sizeof(kprobe_opcode_t));
-               if (idx >= 0 && idx < slots_per_page(c)) {
-                       WARN_ON(kip->slot_used[idx] != SLOT_USED);
-                       if (dirty) {
-                               kip->slot_used[idx] = SLOT_DIRTY;
-                               kip->ngarbage++;
-                               if (++c->nr_garbage > slots_per_page(c))
-                                       collect_garbage_slots(c);
-                       } else
-                               collect_one_slot(kip, idx);
+       rcu_read_lock();
+       list_for_each_entry_rcu(kip, &c->pages, list) {
+               idx = ((long)slot - (long)kip->insns) /
+                       (c->insn_size * sizeof(kprobe_opcode_t));
+               if (idx >= 0 && idx < slots_per_page(c))
                        goto out;
-               }
        }
-       /* Could not free this slot. */
+       /* Could not find this slot. */
        WARN_ON(1);
+       kip = NULL;
 out:
+       rcu_read_unlock();
+       /* Mark and sweep: this may sleep */
+       if (kip) {
+               /* Check double free */
+               WARN_ON(kip->slot_used[idx] != SLOT_USED);
+               if (dirty) {
+                       kip->slot_used[idx] = SLOT_DIRTY;
+                       kip->ngarbage++;
+                       if (++c->nr_garbage > slots_per_page(c))
+                               collect_garbage_slots(c);
+               } else {
+                       collect_one_slot(kip, idx);
+               }
+       }
        mutex_unlock(&c->mutex);
 }
 
+/*
+ * Check given address is on the page of kprobe instruction slots.
+ * This will be used for checking whether the address on a stack
+ * is on a text area or not.
+ */
+bool __is_insn_slot_addr(struct kprobe_insn_cache *c, unsigned long addr)
+{
+       struct kprobe_insn_page *kip;
+       bool ret = false;
+
+       rcu_read_lock();
+       list_for_each_entry_rcu(kip, &c->pages, list) {
+               if (addr >= (unsigned long)kip->insns &&
+                   addr < (unsigned long)kip->insns + PAGE_SIZE) {
+                       ret = true;
+                       break;
+               }
+       }
+       rcu_read_unlock();
+
+       return ret;
+}
+
 #ifdef CONFIG_OPTPROBES
 /* For optimized_kprobe buffer */
 struct kprobe_insn_cache kprobe_optinsn_slots = {
index 2318fba86277180a01aa18aa853badef4c914381..8461a4372e8aab728c64b8ee752583cdac270194 100644 (file)
@@ -850,7 +850,6 @@ void __kthread_queue_delayed_work(struct kthread_worker *worker,
 
        list_add(&work->node, &worker->delayed_work_list);
        work->worker = worker;
-       timer_stats_timer_set_start_info(&dwork->timer);
        timer->expires = jiffies + delay;
        add_timer(timer);
 }
index 6f88e352cd4fbf680f10167c4fbf803cf1990747..760158d9d98d0ff50218a7e617e30171913e9e6c 100644 (file)
@@ -28,3 +28,4 @@ obj-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
 obj-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem-xadd.o
 obj-$(CONFIG_QUEUED_RWLOCKS) += qrwlock.o
 obj-$(CONFIG_LOCK_TORTURE_TEST) += locktorture.o
+obj-$(CONFIG_WW_MUTEX_SELFTEST) += test-ww_mutex.o
index 7c38f8f3d97b7b172dcd61f04df382548cf7ac15..9812e5dd409e98b7cfe8133601854cae8111b772 100644 (file)
@@ -2203,7 +2203,7 @@ cache_hit:
         * Important for check_no_collision().
         */
        if (unlikely(nr_chain_hlocks > MAX_LOCKDEP_CHAIN_HLOCKS)) {
-               if (debug_locks_off_graph_unlock())
+               if (!debug_locks_off_graph_unlock())
                        return 0;
 
                print_lockdep_off("BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!");
@@ -4412,13 +4412,13 @@ void lockdep_rcu_suspicious(const char *file, const int line, const char *s)
 #endif /* #ifdef CONFIG_PROVE_RCU_REPEATEDLY */
        /* Note: the following can be executed concurrently, so be careful. */
        printk("\n");
-       printk("===============================\n");
-       printk("[ INFO: suspicious RCU usage. ]\n");
+       pr_err("===============================\n");
+       pr_err("[ ERR: suspicious RCU usage.  ]\n");
        print_kernel_ident();
-       printk("-------------------------------\n");
-       printk("%s:%d %s!\n", file, line, s);
-       printk("\nother info that might help us debug this:\n\n");
-       printk("\n%srcu_scheduler_active = %d, debug_locks = %d\n",
+       pr_err("-------------------------------\n");
+       pr_err("%s:%d %s!\n", file, line, s);
+       pr_err("\nother info that might help us debug this:\n\n");
+       pr_err("\n%srcu_scheduler_active = %d, debug_locks = %d\n",
               !rcu_lockdep_current_cpu_online()
                        ? "RCU used illegally from offline CPU!\n"
                        : !rcu_is_watching()
index f8c5af52a131f7eb7951f4b5edb4fd4a3224222e..28350dc8ecbb17a69c932e42b1cf370886c15e85 100644 (file)
@@ -372,6 +372,78 @@ static struct lock_torture_ops mutex_lock_ops = {
        .name           = "mutex_lock"
 };
 
+#include <linux/ww_mutex.h>
+static DEFINE_WW_CLASS(torture_ww_class);
+static DEFINE_WW_MUTEX(torture_ww_mutex_0, &torture_ww_class);
+static DEFINE_WW_MUTEX(torture_ww_mutex_1, &torture_ww_class);
+static DEFINE_WW_MUTEX(torture_ww_mutex_2, &torture_ww_class);
+
+static int torture_ww_mutex_lock(void)
+__acquires(torture_ww_mutex_0)
+__acquires(torture_ww_mutex_1)
+__acquires(torture_ww_mutex_2)
+{
+       LIST_HEAD(list);
+       struct reorder_lock {
+               struct list_head link;
+               struct ww_mutex *lock;
+       } locks[3], *ll, *ln;
+       struct ww_acquire_ctx ctx;
+
+       locks[0].lock = &torture_ww_mutex_0;
+       list_add(&locks[0].link, &list);
+
+       locks[1].lock = &torture_ww_mutex_1;
+       list_add(&locks[1].link, &list);
+
+       locks[2].lock = &torture_ww_mutex_2;
+       list_add(&locks[2].link, &list);
+
+       ww_acquire_init(&ctx, &torture_ww_class);
+
+       list_for_each_entry(ll, &list, link) {
+               int err;
+
+               err = ww_mutex_lock(ll->lock, &ctx);
+               if (!err)
+                       continue;
+
+               ln = ll;
+               list_for_each_entry_continue_reverse(ln, &list, link)
+                       ww_mutex_unlock(ln->lock);
+
+               if (err != -EDEADLK)
+                       return err;
+
+               ww_mutex_lock_slow(ll->lock, &ctx);
+               list_move(&ll->link, &list);
+       }
+
+       ww_acquire_fini(&ctx);
+       return 0;
+}
+
+static void torture_ww_mutex_unlock(void)
+__releases(torture_ww_mutex_0)
+__releases(torture_ww_mutex_1)
+__releases(torture_ww_mutex_2)
+{
+       ww_mutex_unlock(&torture_ww_mutex_0);
+       ww_mutex_unlock(&torture_ww_mutex_1);
+       ww_mutex_unlock(&torture_ww_mutex_2);
+}
+
+static struct lock_torture_ops ww_mutex_lock_ops = {
+       .writelock      = torture_ww_mutex_lock,
+       .write_delay    = torture_mutex_delay,
+       .task_boost     = torture_boost_dummy,
+       .writeunlock    = torture_ww_mutex_unlock,
+       .readlock       = NULL,
+       .read_delay     = NULL,
+       .readunlock     = NULL,
+       .name           = "ww_mutex_lock"
+};
+
 #ifdef CONFIG_RT_MUTEXES
 static DEFINE_RT_MUTEX(torture_rtmutex);
 
@@ -780,6 +852,10 @@ static void lock_torture_cleanup(void)
        else
                lock_torture_print_module_parms(cxt.cur_ops,
                                                "End of test: SUCCESS");
+
+       kfree(cxt.lwsa);
+       kfree(cxt.lrsa);
+
 end:
        torture_cleanup_end();
 }
@@ -793,6 +869,7 @@ static int __init lock_torture_init(void)
                &spin_lock_ops, &spin_lock_irq_ops,
                &rw_lock_ops, &rw_lock_irq_ops,
                &mutex_lock_ops,
+               &ww_mutex_lock_ops,
 #ifdef CONFIG_RT_MUTEXES
                &rtmutex_lock_ops,
 #endif
@@ -924,6 +1001,8 @@ static int __init lock_torture_init(void)
                                       GFP_KERNEL);
                if (reader_tasks == NULL) {
                        VERBOSE_TOROUT_ERRSTRING("reader_tasks: Out of memory");
+                       kfree(writer_tasks);
+                       writer_tasks = NULL;
                        firsterr = -ENOMEM;
                        goto unwind;
                }
index a459faa4898738e1121c19509a735fce072418e4..4174417d53094d55c3e89ef08e6f360c6b41557f 100644 (file)
@@ -26,20 +26,3 @@ extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
 extern void debug_mutex_unlock(struct mutex *lock);
 extern void debug_mutex_init(struct mutex *lock, const char *name,
                             struct lock_class_key *key);
-
-#define spin_lock_mutex(lock, flags)                   \
-       do {                                            \
-               struct mutex *l = container_of(lock, struct mutex, wait_lock); \
-                                                       \
-               DEBUG_LOCKS_WARN_ON(in_interrupt());    \
-               local_irq_save(flags);                  \
-               arch_spin_lock(&(lock)->rlock.raw_lock);\
-               DEBUG_LOCKS_WARN_ON(l->magic != l);     \
-       } while (0)
-
-#define spin_unlock_mutex(lock, flags)                         \
-       do {                                                    \
-               arch_spin_unlock(&(lock)->rlock.raw_lock);      \
-               local_irq_restore(flags);                       \
-               preempt_check_resched();                        \
-       } while (0)
index 9b349619f431443479fdbe42fe162715d1e4e3fc..ad2d9e22697b92125a643efd049bd2d3e7a54352 100644 (file)
@@ -50,16 +50,17 @@ EXPORT_SYMBOL(__mutex_init);
 /*
  * @owner: contains: 'struct task_struct *' to the current lock owner,
  * NULL means not owned. Since task_struct pointers are aligned at
- * ARCH_MIN_TASKALIGN (which is at least sizeof(void *)), we have low
- * bits to store extra state.
+ * at least L1_CACHE_BYTES, we have low bits to store extra state.
  *
  * Bit0 indicates a non-empty waiter list; unlock must issue a wakeup.
  * Bit1 indicates unlock needs to hand the lock to the top-waiter
+ * Bit2 indicates handoff has been done and we're waiting for pickup.
  */
 #define MUTEX_FLAG_WAITERS     0x01
 #define MUTEX_FLAG_HANDOFF     0x02
+#define MUTEX_FLAG_PICKUP      0x04
 
-#define MUTEX_FLAGS            0x03
+#define MUTEX_FLAGS            0x07
 
 static inline struct task_struct *__owner_task(unsigned long owner)
 {
@@ -72,38 +73,29 @@ static inline unsigned long __owner_flags(unsigned long owner)
 }
 
 /*
- * Actual trylock that will work on any unlocked state.
- *
- * When setting the owner field, we must preserve the low flag bits.
- *
- * Be careful with @handoff, only set that in a wait-loop (where you set
- * HANDOFF) to avoid recursive lock attempts.
+ * Trylock variant that retuns the owning task on failure.
  */
-static inline bool __mutex_trylock(struct mutex *lock, const bool handoff)
+static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock)
 {
        unsigned long owner, curr = (unsigned long)current;
 
        owner = atomic_long_read(&lock->owner);
        for (;;) { /* must loop, can race against a flag */
                unsigned long old, flags = __owner_flags(owner);
+               unsigned long task = owner & ~MUTEX_FLAGS;
 
-               if (__owner_task(owner)) {
-                       if (handoff && unlikely(__owner_task(owner) == current)) {
-                               /*
-                                * Provide ACQUIRE semantics for the lock-handoff.
-                                *
-                                * We cannot easily use load-acquire here, since
-                                * the actual load is a failed cmpxchg, which
-                                * doesn't imply any barriers.
-                                *
-                                * Also, this is a fairly unlikely scenario, and
-                                * this contains the cost.
-                                */
-                               smp_mb(); /* ACQUIRE */
-                               return true;
-                       }
+               if (task) {
+                       if (likely(task != curr))
+                               break;
 
-                       return false;
+                       if (likely(!(flags & MUTEX_FLAG_PICKUP)))
+                               break;
+
+                       flags &= ~MUTEX_FLAG_PICKUP;
+               } else {
+#ifdef CONFIG_DEBUG_MUTEXES
+                       DEBUG_LOCKS_WARN_ON(flags & MUTEX_FLAG_PICKUP);
+#endif
                }
 
                /*
@@ -111,15 +103,24 @@ static inline bool __mutex_trylock(struct mutex *lock, const bool handoff)
                 * past the point where we acquire it. This would be possible
                 * if we (accidentally) set the bit on an unlocked mutex.
                 */
-               if (handoff)
-                       flags &= ~MUTEX_FLAG_HANDOFF;
+               flags &= ~MUTEX_FLAG_HANDOFF;
 
                old = atomic_long_cmpxchg_acquire(&lock->owner, owner, curr | flags);
                if (old == owner)
-                       return true;
+                       return NULL;
 
                owner = old;
        }
+
+       return __owner_task(owner);
+}
+
+/*
+ * Actual trylock that will work on any unlocked state.
+ */
+static inline bool __mutex_trylock(struct mutex *lock)
+{
+       return !__mutex_trylock_or_owner(lock);
 }
 
 #ifndef CONFIG_DEBUG_LOCK_ALLOC
@@ -171,9 +172,9 @@ static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_wait
 
 /*
  * Give up ownership to a specific task, when @task = NULL, this is equivalent
- * to a regular unlock. Clears HANDOFF, preserves WAITERS. Provides RELEASE
- * semantics like a regular unlock, the __mutex_trylock() provides matching
- * ACQUIRE semantics for the handoff.
+ * to a regular unlock. Sets PICKUP on a handoff, clears HANDOF, preserves
+ * WAITERS. Provides RELEASE semantics like a regular unlock, the
+ * __mutex_trylock() provides a matching ACQUIRE semantics for the handoff.
  */
 static void __mutex_handoff(struct mutex *lock, struct task_struct *task)
 {
@@ -184,10 +185,13 @@ static void __mutex_handoff(struct mutex *lock, struct task_struct *task)
 
 #ifdef CONFIG_DEBUG_MUTEXES
                DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
+               DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP);
 #endif
 
                new = (owner & MUTEX_FLAG_WAITERS);
                new |= (unsigned long)task;
+               if (task)
+                       new |= MUTEX_FLAG_PICKUP;
 
                old = atomic_long_cmpxchg_release(&lock->owner, owner, new);
                if (old == owner)
@@ -237,8 +241,8 @@ void __sched mutex_lock(struct mutex *lock)
 EXPORT_SYMBOL(mutex_lock);
 #endif
 
-static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww,
-                                                  struct ww_acquire_ctx *ww_ctx)
+static __always_inline void
+ww_mutex_lock_acquired(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx)
 {
 #ifdef CONFIG_DEBUG_MUTEXES
        /*
@@ -277,17 +281,50 @@ static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww,
        ww_ctx->acquired++;
 }
 
+static inline bool __sched
+__ww_ctx_stamp_after(struct ww_acquire_ctx *a, struct ww_acquire_ctx *b)
+{
+       return a->stamp - b->stamp <= LONG_MAX &&
+              (a->stamp != b->stamp || a > b);
+}
+
+/*
+ * Wake up any waiters that may have to back off when the lock is held by the
+ * given context.
+ *
+ * Due to the invariants on the wait list, this can only affect the first
+ * waiter with a context.
+ *
+ * The current task must not be on the wait list.
+ */
+static void __sched
+__ww_mutex_wakeup_for_backoff(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
+{
+       struct mutex_waiter *cur;
+
+       lockdep_assert_held(&lock->wait_lock);
+
+       list_for_each_entry(cur, &lock->wait_list, list) {
+               if (!cur->ww_ctx)
+                       continue;
+
+               if (cur->ww_ctx->acquired > 0 &&
+                   __ww_ctx_stamp_after(cur->ww_ctx, ww_ctx)) {
+                       debug_mutex_wake_waiter(lock, cur);
+                       wake_up_process(cur->task);
+               }
+
+               break;
+       }
+}
+
 /*
  * After acquiring lock with fastpath or when we lost out in contested
  * slowpath, set ctx and wake up any waiters so they can recheck.
  */
 static __always_inline void
-ww_mutex_set_context_fastpath(struct ww_mutex *lock,
-                              struct ww_acquire_ctx *ctx)
+ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
 {
-       unsigned long flags;
-       struct mutex_waiter *cur;
-
        ww_mutex_lock_acquired(lock, ctx);
 
        lock->ctx = ctx;
@@ -311,46 +348,79 @@ ww_mutex_set_context_fastpath(struct ww_mutex *lock,
         * Uh oh, we raced in fastpath, wake up everyone in this case,
         * so they can see the new lock->ctx.
         */
-       spin_lock_mutex(&lock->base.wait_lock, flags);
-       list_for_each_entry(cur, &lock->base.wait_list, list) {
-               debug_mutex_wake_waiter(&lock->base, cur);
-               wake_up_process(cur->task);
-       }
-       spin_unlock_mutex(&lock->base.wait_lock, flags);
+       spin_lock(&lock->base.wait_lock);
+       __ww_mutex_wakeup_for_backoff(&lock->base, ctx);
+       spin_unlock(&lock->base.wait_lock);
 }
 
 /*
- * After acquiring lock in the slowpath set ctx and wake up any
- * waiters so they can recheck.
+ * After acquiring lock in the slowpath set ctx.
+ *
+ * Unlike for the fast path, the caller ensures that waiters are woken up where
+ * necessary.
  *
  * Callers must hold the mutex wait_lock.
  */
 static __always_inline void
-ww_mutex_set_context_slowpath(struct ww_mutex *lock,
-                             struct ww_acquire_ctx *ctx)
+ww_mutex_set_context_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
 {
-       struct mutex_waiter *cur;
-
        ww_mutex_lock_acquired(lock, ctx);
        lock->ctx = ctx;
+}
+
+#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
+
+static inline
+bool ww_mutex_spin_on_owner(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
+                           struct mutex_waiter *waiter)
+{
+       struct ww_mutex *ww;
+
+       ww = container_of(lock, struct ww_mutex, base);
 
        /*
-        * Give any possible sleeping processes the chance to wake up,
-        * so they can recheck if they have to back off.
+        * If ww->ctx is set the contents are undefined, only
+        * by acquiring wait_lock there is a guarantee that
+        * they are not invalid when reading.
+        *
+        * As such, when deadlock detection needs to be
+        * performed the optimistic spinning cannot be done.
+        *
+        * Check this in every inner iteration because we may
+        * be racing against another thread's ww_mutex_lock.
         */
-       list_for_each_entry(cur, &lock->base.wait_list, list) {
-               debug_mutex_wake_waiter(&lock->base, cur);
-               wake_up_process(cur->task);
-       }
+       if (ww_ctx->acquired > 0 && READ_ONCE(ww->ctx))
+               return false;
+
+       /*
+        * If we aren't on the wait list yet, cancel the spin
+        * if there are waiters. We want  to avoid stealing the
+        * lock from a waiter with an earlier stamp, since the
+        * other thread may already own a lock that we also
+        * need.
+        */
+       if (!waiter && (atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS))
+               return false;
+
+       /*
+        * Similarly, stop spinning if we are no longer the
+        * first waiter.
+        */
+       if (waiter && !__mutex_waiter_is_first(lock, waiter))
+               return false;
+
+       return true;
 }
 
-#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
 /*
- * Look out! "owner" is an entirely speculative pointer
- * access and not reliable.
+ * Look out! "owner" is an entirely speculative pointer access and not
+ * reliable.
+ *
+ * "noinline" so that this function shows up on perf profiles.
  */
 static noinline
-bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
+bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner,
+                        struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter)
 {
        bool ret = true;
 
@@ -373,6 +443,11 @@ bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
                        break;
                }
 
+               if (ww_ctx && !ww_mutex_spin_on_owner(lock, ww_ctx, waiter)) {
+                       ret = false;
+                       break;
+               }
+
                cpu_relax();
        }
        rcu_read_unlock();
@@ -431,12 +506,10 @@ static inline int mutex_can_spin_on_owner(struct mutex *lock)
  * with the spinner at the head of the OSQ, if present, until the owner is
  * changed to itself.
  */
-static bool mutex_optimistic_spin(struct mutex *lock,
-                                 struct ww_acquire_ctx *ww_ctx,
-                                 const bool use_ww_ctx, const bool waiter)
+static __always_inline bool
+mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
+                     const bool use_ww_ctx, struct mutex_waiter *waiter)
 {
-       struct task_struct *task = current;
-
        if (!waiter) {
                /*
                 * The purpose of the mutex_can_spin_on_owner() function is
@@ -460,40 +533,17 @@ static bool mutex_optimistic_spin(struct mutex *lock,
        for (;;) {
                struct task_struct *owner;
 
-               if (use_ww_ctx && ww_ctx->acquired > 0) {
-                       struct ww_mutex *ww;
-
-                       ww = container_of(lock, struct ww_mutex, base);
-                       /*
-                        * If ww->ctx is set the contents are undefined, only
-                        * by acquiring wait_lock there is a guarantee that
-                        * they are not invalid when reading.
-                        *
-                        * As such, when deadlock detection needs to be
-                        * performed the optimistic spinning cannot be done.
-                        */
-                       if (READ_ONCE(ww->ctx))
-                               goto fail_unlock;
-               }
+               /* Try to acquire the mutex... */
+               owner = __mutex_trylock_or_owner(lock);
+               if (!owner)
+                       break;
 
                /*
-                * If there's an owner, wait for it to either
+                * There's an owner, wait for it to either
                 * release the lock or go to sleep.
                 */
-               owner = __mutex_owner(lock);
-               if (owner) {
-                       if (waiter && owner == task) {
-                               smp_mb(); /* ACQUIRE */
-                               break;
-                       }
-
-                       if (!mutex_spin_on_owner(lock, owner))
-                               goto fail_unlock;
-               }
-
-               /* Try to acquire the mutex if it is unlocked. */
-               if (__mutex_trylock(lock, waiter))
-                       break;
+               if (!mutex_spin_on_owner(lock, owner, ww_ctx, waiter))
+                       goto fail_unlock;
 
                /*
                 * The cpu_relax() call is a compiler barrier which forces
@@ -532,9 +582,9 @@ fail:
        return false;
 }
 #else
-static bool mutex_optimistic_spin(struct mutex *lock,
-                                 struct ww_acquire_ctx *ww_ctx,
-                                 const bool use_ww_ctx, const bool waiter)
+static __always_inline bool
+mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
+                     const bool use_ww_ctx, struct mutex_waiter *waiter)
 {
        return false;
 }
@@ -594,23 +644,88 @@ void __sched ww_mutex_unlock(struct ww_mutex *lock)
 EXPORT_SYMBOL(ww_mutex_unlock);
 
 static inline int __sched
-__ww_mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx)
+__ww_mutex_lock_check_stamp(struct mutex *lock, struct mutex_waiter *waiter,
+                           struct ww_acquire_ctx *ctx)
 {
        struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
        struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
+       struct mutex_waiter *cur;
+
+       if (hold_ctx && __ww_ctx_stamp_after(ctx, hold_ctx))
+               goto deadlock;
+
+       /*
+        * If there is a waiter in front of us that has a context, then its
+        * stamp is earlier than ours and we must back off.
+        */
+       cur = waiter;
+       list_for_each_entry_continue_reverse(cur, &lock->wait_list, list) {
+               if (cur->ww_ctx)
+                       goto deadlock;
+       }
+
+       return 0;
 
-       if (!hold_ctx)
+deadlock:
+#ifdef CONFIG_DEBUG_MUTEXES
+       DEBUG_LOCKS_WARN_ON(ctx->contending_lock);
+       ctx->contending_lock = ww;
+#endif
+       return -EDEADLK;
+}
+
+static inline int __sched
+__ww_mutex_add_waiter(struct mutex_waiter *waiter,
+                     struct mutex *lock,
+                     struct ww_acquire_ctx *ww_ctx)
+{
+       struct mutex_waiter *cur;
+       struct list_head *pos;
+
+       if (!ww_ctx) {
+               list_add_tail(&waiter->list, &lock->wait_list);
                return 0;
+       }
 
-       if (ctx->stamp - hold_ctx->stamp <= LONG_MAX &&
-           (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) {
+       /*
+        * Add the waiter before the first waiter with a higher stamp.
+        * Waiters without a context are skipped to avoid starving
+        * them.
+        */
+       pos = &lock->wait_list;
+       list_for_each_entry_reverse(cur, &lock->wait_list, list) {
+               if (!cur->ww_ctx)
+                       continue;
+
+               if (__ww_ctx_stamp_after(ww_ctx, cur->ww_ctx)) {
+                       /* Back off immediately if necessary. */
+                       if (ww_ctx->acquired > 0) {
 #ifdef CONFIG_DEBUG_MUTEXES
-               DEBUG_LOCKS_WARN_ON(ctx->contending_lock);
-               ctx->contending_lock = ww;
+                               struct ww_mutex *ww;
+
+                               ww = container_of(lock, struct ww_mutex, base);
+                               DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock);
+                               ww_ctx->contending_lock = ww;
 #endif
-               return -EDEADLK;
+                               return -EDEADLK;
+                       }
+
+                       break;
+               }
+
+               pos = &cur->list;
+
+               /*
+                * Wake up the waiter so that it gets a chance to back
+                * off.
+                */
+               if (cur->ww_ctx->acquired > 0) {
+                       debug_mutex_wake_waiter(lock, cur);
+                       wake_up_process(cur->task);
+               }
        }
 
+       list_add_tail(&waiter->list, pos);
        return 0;
 }
 
@@ -622,15 +737,15 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
                    struct lockdep_map *nest_lock, unsigned long ip,
                    struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
 {
-       struct task_struct *task = current;
        struct mutex_waiter waiter;
-       unsigned long flags;
        bool first = false;
        struct ww_mutex *ww;
        int ret;
 
-       if (use_ww_ctx) {
-               ww = container_of(lock, struct ww_mutex, base);
+       might_sleep();
+
+       ww = container_of(lock, struct ww_mutex, base);
+       if (use_ww_ctx && ww_ctx) {
                if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
                        return -EALREADY;
        }
@@ -638,36 +753,54 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
        preempt_disable();
        mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
 
-       if (__mutex_trylock(lock, false) ||
-           mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, false)) {
+       if (__mutex_trylock(lock) ||
+           mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, NULL)) {
                /* got the lock, yay! */
                lock_acquired(&lock->dep_map, ip);
-               if (use_ww_ctx)
+               if (use_ww_ctx && ww_ctx)
                        ww_mutex_set_context_fastpath(ww, ww_ctx);
                preempt_enable();
                return 0;
        }
 
-       spin_lock_mutex(&lock->wait_lock, flags);
+       spin_lock(&lock->wait_lock);
        /*
         * After waiting to acquire the wait_lock, try again.
         */
-       if (__mutex_trylock(lock, false))
+       if (__mutex_trylock(lock)) {
+               if (use_ww_ctx && ww_ctx)
+                       __ww_mutex_wakeup_for_backoff(lock, ww_ctx);
+
                goto skip_wait;
+       }
 
        debug_mutex_lock_common(lock, &waiter);
-       debug_mutex_add_waiter(lock, &waiter, task);
+       debug_mutex_add_waiter(lock, &waiter, current);
+
+       lock_contended(&lock->dep_map, ip);
 
-       /* add waiting tasks to the end of the waitqueue (FIFO): */
-       list_add_tail(&waiter.list, &lock->wait_list);
-       waiter.task = task;
+       if (!use_ww_ctx) {
+               /* add waiting tasks to the end of the waitqueue (FIFO): */
+               list_add_tail(&waiter.list, &lock->wait_list);
+
+#ifdef CONFIG_DEBUG_MUTEXES
+               waiter.ww_ctx = MUTEX_POISON_WW_CTX;
+#endif
+       } else {
+               /* Add in stamp order, waking up waiters that must back off. */
+               ret = __ww_mutex_add_waiter(&waiter, lock, ww_ctx);
+               if (ret)
+                       goto err_early_backoff;
+
+               waiter.ww_ctx = ww_ctx;
+       }
+
+       waiter.task = current;
 
        if (__mutex_waiter_is_first(lock, &waiter))
                __mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
 
-       lock_contended(&lock->dep_map, ip);
-
-       set_task_state(task, state);
+       set_current_state(state);
        for (;;) {
                /*
                 * Once we hold wait_lock, we're serialized against
@@ -675,7 +808,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
                 * before testing the error conditions to make sure we pick up
                 * the handoff.
                 */
-               if (__mutex_trylock(lock, first))
+               if (__mutex_trylock(lock))
                        goto acquired;
 
                /*
@@ -683,42 +816,47 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
                 * wait_lock. This ensures the lock cancellation is ordered
                 * against mutex_unlock() and wake-ups do not go missing.
                 */
-               if (unlikely(signal_pending_state(state, task))) {
+               if (unlikely(signal_pending_state(state, current))) {
                        ret = -EINTR;
                        goto err;
                }
 
-               if (use_ww_ctx && ww_ctx->acquired > 0) {
-                       ret = __ww_mutex_lock_check_stamp(lock, ww_ctx);
+               if (use_ww_ctx && ww_ctx && ww_ctx->acquired > 0) {
+                       ret = __ww_mutex_lock_check_stamp(lock, &waiter, ww_ctx);
                        if (ret)
                                goto err;
                }
 
-               spin_unlock_mutex(&lock->wait_lock, flags);
+               spin_unlock(&lock->wait_lock);
                schedule_preempt_disabled();
 
-               if (!first && __mutex_waiter_is_first(lock, &waiter)) {
-                       first = true;
-                       __mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
+               /*
+                * ww_mutex needs to always recheck its position since its waiter
+                * list is not FIFO ordered.
+                */
+               if ((use_ww_ctx && ww_ctx) || !first) {
+                       first = __mutex_waiter_is_first(lock, &waiter);
+                       if (first)
+                               __mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
                }
 
-               set_task_state(task, state);
+               set_current_state(state);
                /*
                 * Here we order against unlock; we must either see it change
                 * state back to RUNNING and fall through the next schedule(),
                 * or we must see its unlock and acquire.
                 */
-               if ((first && mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, true)) ||
-                    __mutex_trylock(lock, first))
+               if (__mutex_trylock(lock) ||
+                   (first && mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, &waiter)))
                        break;
 
-               spin_lock_mutex(&lock->wait_lock, flags);
+               spin_lock(&lock->wait_lock);
        }
-       spin_lock_mutex(&lock->wait_lock, flags);
+       spin_lock(&lock->wait_lock);
 acquired:
-       __set_task_state(task, TASK_RUNNING);
+       __set_current_state(TASK_RUNNING);
 
-       mutex_remove_waiter(lock, &waiter, task);
+       mutex_remove_waiter(lock, &waiter, current);
        if (likely(list_empty(&lock->wait_list)))
                __mutex_clear_flag(lock, MUTEX_FLAGS);
 
@@ -728,30 +866,44 @@ skip_wait:
        /* got the lock - cleanup and rejoice! */
        lock_acquired(&lock->dep_map, ip);
 
-       if (use_ww_ctx)
+       if (use_ww_ctx && ww_ctx)
                ww_mutex_set_context_slowpath(ww, ww_ctx);
 
-       spin_unlock_mutex(&lock->wait_lock, flags);
+       spin_unlock(&lock->wait_lock);
        preempt_enable();
        return 0;
 
 err:
-       __set_task_state(task, TASK_RUNNING);
-       mutex_remove_waiter(lock, &waiter, task);
-       spin_unlock_mutex(&lock->wait_lock, flags);
+       __set_current_state(TASK_RUNNING);
+       mutex_remove_waiter(lock, &waiter, current);
+err_early_backoff:
+       spin_unlock(&lock->wait_lock);
        debug_mutex_free_waiter(&waiter);
        mutex_release(&lock->dep_map, 1, ip);
        preempt_enable();
        return ret;
 }
 
+static int __sched
+__mutex_lock(struct mutex *lock, long state, unsigned int subclass,
+            struct lockdep_map *nest_lock, unsigned long ip)
+{
+       return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL, false);
+}
+
+static int __sched
+__ww_mutex_lock(struct mutex *lock, long state, unsigned int subclass,
+               struct lockdep_map *nest_lock, unsigned long ip,
+               struct ww_acquire_ctx *ww_ctx)
+{
+       return __mutex_lock_common(lock, state, subclass, nest_lock, ip, ww_ctx, true);
+}
+
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
 void __sched
 mutex_lock_nested(struct mutex *lock, unsigned int subclass)
 {
-       might_sleep();
-       __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
-                           subclass, NULL, _RET_IP_, NULL, 0);
+       __mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
 }
 
 EXPORT_SYMBOL_GPL(mutex_lock_nested);
@@ -759,30 +911,38 @@ EXPORT_SYMBOL_GPL(mutex_lock_nested);
 void __sched
 _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
 {
-       might_sleep();
-       __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
-                           0, nest, _RET_IP_, NULL, 0);
+       __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_);
 }
 EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
 
 int __sched
 mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
 {
-       might_sleep();
-       return __mutex_lock_common(lock, TASK_KILLABLE,
-                                  subclass, NULL, _RET_IP_, NULL, 0);
+       return __mutex_lock(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_);
 }
 EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
 
 int __sched
 mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
 {
-       might_sleep();
-       return __mutex_lock_common(lock, TASK_INTERRUPTIBLE,
-                                  subclass, NULL, _RET_IP_, NULL, 0);
+       return __mutex_lock(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_);
 }
 EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
 
+void __sched
+mutex_lock_io_nested(struct mutex *lock, unsigned int subclass)
+{
+       int token;
+
+       might_sleep();
+
+       token = io_schedule_prepare();
+       __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
+                           subclass, NULL, _RET_IP_, NULL, 0);
+       io_schedule_finish(token);
+}
+EXPORT_SYMBOL_GPL(mutex_lock_io_nested);
+
 static inline int
 ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
 {
@@ -810,35 +970,37 @@ ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
 }
 
 int __sched
-__ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
+ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
 {
        int ret;
 
        might_sleep();
-       ret =  __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE,
-                                  0, &ctx->dep_map, _RET_IP_, ctx, 1);
-       if (!ret && ctx->acquired > 1)
+       ret =  __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE,
+                              0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
+                              ctx);
+       if (!ret && ctx && ctx->acquired > 1)
                return ww_mutex_deadlock_injection(lock, ctx);
 
        return ret;
 }
-EXPORT_SYMBOL_GPL(__ww_mutex_lock);
+EXPORT_SYMBOL_GPL(ww_mutex_lock);
 
 int __sched
-__ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
+ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
 {
        int ret;
 
        might_sleep();
-       ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE,
-                                 0, &ctx->dep_map, _RET_IP_, ctx, 1);
+       ret = __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE,
+                             0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
+                             ctx);
 
-       if (!ret && ctx->acquired > 1)
+       if (!ret && ctx && ctx->acquired > 1)
                return ww_mutex_deadlock_injection(lock, ctx);
 
        return ret;
 }
-EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible);
+EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible);
 
 #endif
 
@@ -848,8 +1010,8 @@ EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible);
 static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip)
 {
        struct task_struct *next = NULL;
-       unsigned long owner, flags;
        DEFINE_WAKE_Q(wake_q);
+       unsigned long owner;
 
        mutex_release(&lock->dep_map, 1, ip);
 
@@ -866,6 +1028,7 @@ static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigne
 
 #ifdef CONFIG_DEBUG_MUTEXES
                DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
+               DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP);
 #endif
 
                if (owner & MUTEX_FLAG_HANDOFF)
@@ -883,7 +1046,7 @@ static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigne
                owner = old;
        }
 
-       spin_lock_mutex(&lock->wait_lock, flags);
+       spin_lock(&lock->wait_lock);
        debug_mutex_unlock(lock);
        if (!list_empty(&lock->wait_list)) {
                /* get the first entry from the wait-list: */
@@ -900,7 +1063,7 @@ static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigne
        if (owner & MUTEX_FLAG_HANDOFF)
                __mutex_handoff(lock, next);
 
-       spin_unlock_mutex(&lock->wait_lock, flags);
+       spin_unlock(&lock->wait_lock);
 
        wake_up_q(&wake_q);
 }
@@ -950,40 +1113,47 @@ int __sched mutex_lock_killable(struct mutex *lock)
 }
 EXPORT_SYMBOL(mutex_lock_killable);
 
+void __sched mutex_lock_io(struct mutex *lock)
+{
+       int token;
+
+       token = io_schedule_prepare();
+       mutex_lock(lock);
+       io_schedule_finish(token);
+}
+EXPORT_SYMBOL_GPL(mutex_lock_io);
+
 static noinline void __sched
 __mutex_lock_slowpath(struct mutex *lock)
 {
-       __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0,
-                           NULL, _RET_IP_, NULL, 0);
+       __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
 }
 
 static noinline int __sched
 __mutex_lock_killable_slowpath(struct mutex *lock)
 {
-       return __mutex_lock_common(lock, TASK_KILLABLE, 0,
-                                  NULL, _RET_IP_, NULL, 0);
+       return __mutex_lock(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
 }
 
 static noinline int __sched
 __mutex_lock_interruptible_slowpath(struct mutex *lock)
 {
-       return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0,
-                                  NULL, _RET_IP_, NULL, 0);
+       return __mutex_lock(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
 }
 
 static noinline int __sched
 __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
 {
-       return __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 0,
-                                  NULL, _RET_IP_, ctx, 1);
+       return __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 0, NULL,
+                              _RET_IP_, ctx);
 }
 
 static noinline int __sched
 __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
                                            struct ww_acquire_ctx *ctx)
 {
-       return __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 0,
-                                  NULL, _RET_IP_, ctx, 1);
+       return __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 0, NULL,
+                              _RET_IP_, ctx);
 }
 
 #endif
@@ -1004,7 +1174,7 @@ __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
  */
 int __sched mutex_trylock(struct mutex *lock)
 {
-       bool locked = __mutex_trylock(lock, false);
+       bool locked = __mutex_trylock(lock);
 
        if (locked)
                mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
@@ -1015,32 +1185,34 @@ EXPORT_SYMBOL(mutex_trylock);
 
 #ifndef CONFIG_DEBUG_LOCK_ALLOC
 int __sched
-__ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
+ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
 {
        might_sleep();
 
        if (__mutex_trylock_fast(&lock->base)) {
-               ww_mutex_set_context_fastpath(lock, ctx);
+               if (ctx)
+                       ww_mutex_set_context_fastpath(lock, ctx);
                return 0;
        }
 
        return __ww_mutex_lock_slowpath(lock, ctx);
 }
-EXPORT_SYMBOL(__ww_mutex_lock);
+EXPORT_SYMBOL(ww_mutex_lock);
 
 int __sched
-__ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
+ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
 {
        might_sleep();
 
        if (__mutex_trylock_fast(&lock->base)) {
-               ww_mutex_set_context_fastpath(lock, ctx);
+               if (ctx)
+                       ww_mutex_set_context_fastpath(lock, ctx);
                return 0;
        }
 
        return __ww_mutex_lock_interruptible_slowpath(lock, ctx);
 }
-EXPORT_SYMBOL(__ww_mutex_lock_interruptible);
+EXPORT_SYMBOL(ww_mutex_lock_interruptible);
 
 #endif
 
index 4410a4af42a338d5a826fb20c67cc4eba31034f8..6ebc1902f779fe6d89cc7fe4fe1f78c10610eed6 100644 (file)
@@ -9,10 +9,6 @@
  * !CONFIG_DEBUG_MUTEXES case. Most of them are NOPs:
  */
 
-#define spin_lock_mutex(lock, flags) \
-               do { spin_lock(lock); (void)(flags); } while (0)
-#define spin_unlock_mutex(lock, flags) \
-               do { spin_unlock(lock); (void)(flags); } while (0)
 #define mutex_remove_waiter(lock, waiter, task) \
                __list_del((waiter)->list.prev, (waiter)->list.next)
 
index ce182599cf2e98b51831adbf5dca6ce545df0d7f..883cf1b92d9084f30a21f699211d6cd2ca3b9362 100644 (file)
@@ -1,7 +1,6 @@
 #include <linux/atomic.h>
 #include <linux/rwsem.h>
 #include <linux/percpu.h>
-#include <linux/wait.h>
 #include <linux/lockdep.h>
 #include <linux/percpu-rwsem.h>
 #include <linux/rcupdate.h>
@@ -18,7 +17,7 @@ int __percpu_init_rwsem(struct percpu_rw_semaphore *sem,
        /* ->rw_sem represents the whole percpu_rw_semaphore for lockdep */
        rcu_sync_init(&sem->rss, RCU_SCHED_SYNC);
        __init_rwsem(&sem->rw_sem, name, rwsem_key);
-       init_waitqueue_head(&sem->writer);
+       rcuwait_init(&sem->writer);
        sem->readers_block = 0;
        return 0;
 }
@@ -103,7 +102,7 @@ void __percpu_up_read(struct percpu_rw_semaphore *sem)
        __this_cpu_dec(*sem->read_count);
 
        /* Prod writer to recheck readers_active */
-       wake_up(&sem->writer);
+       rcuwait_wake_up(&sem->writer);
 }
 EXPORT_SYMBOL_GPL(__percpu_up_read);
 
@@ -160,7 +159,7 @@ void percpu_down_write(struct percpu_rw_semaphore *sem)
         */
 
        /* Wait for all now active readers to complete. */
-       wait_event(sem->writer, readers_active_check(sem));
+       rcuwait_wait_event(&sem->writer, readers_active_check(sem));
 }
 EXPORT_SYMBOL_GPL(percpu_down_write);
 
index e3b5520005db7fa5521510687b432f727c6715c3..e6b2f7ad3e51d4fb901be3e1d02dcea84aca0b2a 100644 (file)
@@ -263,7 +263,7 @@ pv_wait_early(struct pv_node *prev, int loop)
        if ((loop & PV_PREV_CHECK_MASK) != 0)
                return false;
 
-       return READ_ONCE(prev->state) != vcpu_running;
+       return READ_ONCE(prev->state) != vcpu_running || vcpu_is_preempted(prev->cpu);
 }
 
 /*
index 2f443ed2320a7b2d0251f6d40f04732e89c6464f..d340be3a488f7afa383c8d7467f786e5f241dcc7 100644 (file)
@@ -1179,7 +1179,7 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state,
                 * TASK_INTERRUPTIBLE checks for signals and
                 * timeout. Ignored otherwise.
                 */
-               if (unlikely(state == TASK_INTERRUPTIBLE)) {
+               if (likely(state == TASK_INTERRUPTIBLE)) {
                        /* Signal pending? */
                        if (signal_pending(current))
                                ret = -EINTR;
index 1591f6b3539fd5120dc6884e5ab392572398ecc1..5eacab880f672c40859709a0021c50e68e57f15f 100644 (file)
@@ -128,7 +128,6 @@ __rwsem_wake_one_writer(struct rw_semaphore *sem)
 void __sched __down_read(struct rw_semaphore *sem)
 {
        struct rwsem_waiter waiter;
-       struct task_struct *tsk;
        unsigned long flags;
 
        raw_spin_lock_irqsave(&sem->wait_lock, flags);
@@ -140,13 +139,12 @@ void __sched __down_read(struct rw_semaphore *sem)
                goto out;
        }
 
-       tsk = current;
-       set_task_state(tsk, TASK_UNINTERRUPTIBLE);
+       set_current_state(TASK_UNINTERRUPTIBLE);
 
        /* set up my own style of waitqueue */
-       waiter.task = tsk;
+       waiter.task = current;
        waiter.type = RWSEM_WAITING_FOR_READ;
-       get_task_struct(tsk);
+       get_task_struct(current);
 
        list_add_tail(&waiter.list, &sem->wait_list);
 
@@ -158,10 +156,10 @@ void __sched __down_read(struct rw_semaphore *sem)
                if (!waiter.task)
                        break;
                schedule();
-               set_task_state(tsk, TASK_UNINTERRUPTIBLE);
+               set_current_state(TASK_UNINTERRUPTIBLE);
        }
 
-       __set_task_state(tsk, TASK_RUNNING);
+       __set_current_state(TASK_RUNNING);
  out:
        ;
 }
@@ -194,15 +192,13 @@ int __down_read_trylock(struct rw_semaphore *sem)
 int __sched __down_write_common(struct rw_semaphore *sem, int state)
 {
        struct rwsem_waiter waiter;
-       struct task_struct *tsk;
        unsigned long flags;
        int ret = 0;
 
        raw_spin_lock_irqsave(&sem->wait_lock, flags);
 
        /* set up my own style of waitqueue */
-       tsk = current;
-       waiter.task = tsk;
+       waiter.task = current;
        waiter.type = RWSEM_WAITING_FOR_WRITE;
        list_add_tail(&waiter.list, &sem->wait_list);
 
@@ -220,7 +216,7 @@ int __sched __down_write_common(struct rw_semaphore *sem, int state)
                        ret = -EINTR;
                        goto out;
                }
-               set_task_state(tsk, state);
+               set_current_state(state);
                raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
                schedule();
                raw_spin_lock_irqsave(&sem->wait_lock, flags);
index 631506004f9e04fa3b0b03a17b3d2f9909c7fe61..2ad8d8dc3bb19db644d0ee8f8301d61b71d0542d 100644 (file)
@@ -224,10 +224,9 @@ struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem)
 {
        long count, adjustment = -RWSEM_ACTIVE_READ_BIAS;
        struct rwsem_waiter waiter;
-       struct task_struct *tsk = current;
        DEFINE_WAKE_Q(wake_q);
 
-       waiter.task = tsk;
+       waiter.task = current;
        waiter.type = RWSEM_WAITING_FOR_READ;
 
        raw_spin_lock_irq(&sem->wait_lock);
@@ -254,13 +253,13 @@ struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem)
 
        /* wait to be given the lock */
        while (true) {
-               set_task_state(tsk, TASK_UNINTERRUPTIBLE);
+               set_current_state(TASK_UNINTERRUPTIBLE);
                if (!waiter.task)
                        break;
                schedule();
        }
 
-       __set_task_state(tsk, TASK_RUNNING);
+       __set_current_state(TASK_RUNNING);
        return sem;
 }
 EXPORT_SYMBOL(rwsem_down_read_failed);
@@ -503,8 +502,6 @@ __rwsem_down_write_failed_common(struct rw_semaphore *sem, int state)
                 * wake any read locks that were queued ahead of us.
                 */
                if (count > RWSEM_WAITING_BIAS) {
-                       DEFINE_WAKE_Q(wake_q);
-
                        __rwsem_mark_wake(sem, RWSEM_WAKE_READERS, &wake_q);
                        /*
                         * The wakeup is normally called _after_ the wait_lock
@@ -514,6 +511,11 @@ __rwsem_down_write_failed_common(struct rw_semaphore *sem, int state)
                         * for attempting rwsem_try_write_lock().
                         */
                        wake_up_q(&wake_q);
+
+                       /*
+                        * Reinitialize wake_q after use.
+                        */
+                       wake_q_init(&wake_q);
                }
 
        } else
index b8120abe594b893e164791afb0e4527d6ed3886c..9512e37637dc709318f29f87761a5e1b5174e853 100644 (file)
@@ -204,19 +204,18 @@ struct semaphore_waiter {
 static inline int __sched __down_common(struct semaphore *sem, long state,
                                                                long timeout)
 {
-       struct task_struct *task = current;
        struct semaphore_waiter waiter;
 
        list_add_tail(&waiter.list, &sem->wait_list);
-       waiter.task = task;
+       waiter.task = current;
        waiter.up = false;
 
        for (;;) {
-               if (signal_pending_state(state, task))
+               if (signal_pending_state(state, current))
                        goto interrupted;
                if (unlikely(timeout <= 0))
                        goto timed_out;
-               __set_task_state(task, state);
+               __set_current_state(state);
                raw_spin_unlock_irq(&sem->lock);
                timeout = schedule_timeout(timeout);
                raw_spin_lock_irq(&sem->lock);
index db3ccb1dd614800e006c0a8cfd99b9c40aae124a..4b082b5cac9eedfd7c31daef7e8dd02974eedbed 100644 (file)
@@ -363,14 +363,6 @@ void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
 }
 EXPORT_SYMBOL(_raw_spin_lock_nested);
 
-void __lockfunc _raw_spin_lock_bh_nested(raw_spinlock_t *lock, int subclass)
-{
-       __local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
-       spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
-       LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
-}
-EXPORT_SYMBOL(_raw_spin_lock_bh_nested);
-
 unsigned long __lockfunc _raw_spin_lock_irqsave_nested(raw_spinlock_t *lock,
                                                   int subclass)
 {
index 0374a596cffac8439d36875ba2bd248753caf886..9aa0fccd5d432d7383bbf186d0cd7fc42893108d 100644 (file)
@@ -103,38 +103,14 @@ static inline void debug_spin_unlock(raw_spinlock_t *lock)
        lock->owner_cpu = -1;
 }
 
-static void __spin_lock_debug(raw_spinlock_t *lock)
-{
-       u64 i;
-       u64 loops = loops_per_jiffy * HZ;
-
-       for (i = 0; i < loops; i++) {
-               if (arch_spin_trylock(&lock->raw_lock))
-                       return;
-               __delay(1);
-       }
-       /* lockup suspected: */
-       spin_dump(lock, "lockup suspected");
-#ifdef CONFIG_SMP
-       trigger_all_cpu_backtrace();
-#endif
-
-       /*
-        * The trylock above was causing a livelock.  Give the lower level arch
-        * specific lock code a chance to acquire the lock. We have already
-        * printed a warning/backtrace at this point. The non-debug arch
-        * specific code might actually succeed in acquiring the lock.  If it is
-        * not successful, the end-result is the same - there is no forward
-        * progress.
-        */
-       arch_spin_lock(&lock->raw_lock);
-}
-
+/*
+ * We are now relying on the NMI watchdog to detect lockup instead of doing
+ * the detection here with an unfair lock which can cause problem of its own.
+ */
 void do_raw_spin_lock(raw_spinlock_t *lock)
 {
        debug_spin_lock_before(lock);
-       if (unlikely(!arch_spin_trylock(&lock->raw_lock)))
-               __spin_lock_debug(lock);
+       arch_spin_lock(&lock->raw_lock);
        debug_spin_lock_after(lock);
 }
 
@@ -172,32 +148,6 @@ static void rwlock_bug(rwlock_t *lock, const char *msg)
 
 #define RWLOCK_BUG_ON(cond, lock, msg) if (unlikely(cond)) rwlock_bug(lock, msg)
 
-#if 0          /* __write_lock_debug() can lock up - maybe this can too? */
-static void __read_lock_debug(rwlock_t *lock)
-{
-       u64 i;
-       u64 loops = loops_per_jiffy * HZ;
-       int print_once = 1;
-
-       for (;;) {
-               for (i = 0; i < loops; i++) {
-                       if (arch_read_trylock(&lock->raw_lock))
-                               return;
-                       __delay(1);
-               }
-               /* lockup suspected: */
-               if (print_once) {
-                       print_once = 0;
-                       printk(KERN_EMERG "BUG: read-lock lockup on CPU#%d, "
-                                       "%s/%d, %p\n",
-                               raw_smp_processor_id(), current->comm,
-                               current->pid, lock);
-                       dump_stack();
-               }
-       }
-}
-#endif
-
 void do_raw_read_lock(rwlock_t *lock)
 {
        RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
@@ -247,32 +197,6 @@ static inline void debug_write_unlock(rwlock_t *lock)
        lock->owner_cpu = -1;
 }
 
-#if 0          /* This can cause lockups */
-static void __write_lock_debug(rwlock_t *lock)
-{
-       u64 i;
-       u64 loops = loops_per_jiffy * HZ;
-       int print_once = 1;
-
-       for (;;) {
-               for (i = 0; i < loops; i++) {
-                       if (arch_write_trylock(&lock->raw_lock))
-                               return;
-                       __delay(1);
-               }
-               /* lockup suspected: */
-               if (print_once) {
-                       print_once = 0;
-                       printk(KERN_EMERG "BUG: write-lock lockup on CPU#%d, "
-                                       "%s/%d, %p\n",
-                               raw_smp_processor_id(), current->comm,
-                               current->pid, lock);
-                       dump_stack();
-               }
-       }
-}
-#endif
-
 void do_raw_write_lock(rwlock_t *lock)
 {
        debug_write_lock_before(lock);
diff --git a/kernel/locking/test-ww_mutex.c b/kernel/locking/test-ww_mutex.c
new file mode 100644 (file)
index 0000000..da6c9a3
--- /dev/null
@@ -0,0 +1,646 @@
+/*
+ * Module-based API test facility for ww_mutexes
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ */
+
+#include <linux/kernel.h>
+
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+#include <linux/ww_mutex.h>
+
+static DEFINE_WW_CLASS(ww_class);
+struct workqueue_struct *wq;
+
+struct test_mutex {
+       struct work_struct work;
+       struct ww_mutex mutex;
+       struct completion ready, go, done;
+       unsigned int flags;
+};
+
+#define TEST_MTX_SPIN BIT(0)
+#define TEST_MTX_TRY BIT(1)
+#define TEST_MTX_CTX BIT(2)
+#define __TEST_MTX_LAST BIT(3)
+
+static void test_mutex_work(struct work_struct *work)
+{
+       struct test_mutex *mtx = container_of(work, typeof(*mtx), work);
+
+       complete(&mtx->ready);
+       wait_for_completion(&mtx->go);
+
+       if (mtx->flags & TEST_MTX_TRY) {
+               while (!ww_mutex_trylock(&mtx->mutex))
+                       cpu_relax();
+       } else {
+               ww_mutex_lock(&mtx->mutex, NULL);
+       }
+       complete(&mtx->done);
+       ww_mutex_unlock(&mtx->mutex);
+}
+
+static int __test_mutex(unsigned int flags)
+{
+#define TIMEOUT (HZ / 16)
+       struct test_mutex mtx;
+       struct ww_acquire_ctx ctx;
+       int ret;
+
+       ww_mutex_init(&mtx.mutex, &ww_class);
+       ww_acquire_init(&ctx, &ww_class);
+
+       INIT_WORK_ONSTACK(&mtx.work, test_mutex_work);
+       init_completion(&mtx.ready);
+       init_completion(&mtx.go);
+       init_completion(&mtx.done);
+       mtx.flags = flags;
+
+       schedule_work(&mtx.work);
+
+       wait_for_completion(&mtx.ready);
+       ww_mutex_lock(&mtx.mutex, (flags & TEST_MTX_CTX) ? &ctx : NULL);
+       complete(&mtx.go);
+       if (flags & TEST_MTX_SPIN) {
+               unsigned long timeout = jiffies + TIMEOUT;
+
+               ret = 0;
+               do {
+                       if (completion_done(&mtx.done)) {
+                               ret = -EINVAL;
+                               break;
+                       }
+                       cpu_relax();
+               } while (time_before(jiffies, timeout));
+       } else {
+               ret = wait_for_completion_timeout(&mtx.done, TIMEOUT);
+       }
+       ww_mutex_unlock(&mtx.mutex);
+       ww_acquire_fini(&ctx);
+
+       if (ret) {
+               pr_err("%s(flags=%x): mutual exclusion failure\n",
+                      __func__, flags);
+               ret = -EINVAL;
+       }
+
+       flush_work(&mtx.work);
+       destroy_work_on_stack(&mtx.work);
+       return ret;
+#undef TIMEOUT
+}
+
+static int test_mutex(void)
+{
+       int ret;
+       int i;
+
+       for (i = 0; i < __TEST_MTX_LAST; i++) {
+               ret = __test_mutex(i);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
+static int test_aa(void)
+{
+       struct ww_mutex mutex;
+       struct ww_acquire_ctx ctx;
+       int ret;
+
+       ww_mutex_init(&mutex, &ww_class);
+       ww_acquire_init(&ctx, &ww_class);
+
+       ww_mutex_lock(&mutex, &ctx);
+
+       if (ww_mutex_trylock(&mutex))  {
+               pr_err("%s: trylocked itself!\n", __func__);
+               ww_mutex_unlock(&mutex);
+               ret = -EINVAL;
+               goto out;
+       }
+
+       ret = ww_mutex_lock(&mutex, &ctx);
+       if (ret != -EALREADY) {
+               pr_err("%s: missed deadlock for recursing, ret=%d\n",
+                      __func__, ret);
+               if (!ret)
+                       ww_mutex_unlock(&mutex);
+               ret = -EINVAL;
+               goto out;
+       }
+
+       ret = 0;
+out:
+       ww_mutex_unlock(&mutex);
+       ww_acquire_fini(&ctx);
+       return ret;
+}
+
+struct test_abba {
+       struct work_struct work;
+       struct ww_mutex a_mutex;
+       struct ww_mutex b_mutex;
+       struct completion a_ready;
+       struct completion b_ready;
+       bool resolve;
+       int result;
+};
+
+static void test_abba_work(struct work_struct *work)
+{
+       struct test_abba *abba = container_of(work, typeof(*abba), work);
+       struct ww_acquire_ctx ctx;
+       int err;
+
+       ww_acquire_init(&ctx, &ww_class);
+       ww_mutex_lock(&abba->b_mutex, &ctx);
+
+       complete(&abba->b_ready);
+       wait_for_completion(&abba->a_ready);
+
+       err = ww_mutex_lock(&abba->a_mutex, &ctx);
+       if (abba->resolve && err == -EDEADLK) {
+               ww_mutex_unlock(&abba->b_mutex);
+               ww_mutex_lock_slow(&abba->a_mutex, &ctx);
+               err = ww_mutex_lock(&abba->b_mutex, &ctx);
+       }
+
+       if (!err)
+               ww_mutex_unlock(&abba->a_mutex);
+       ww_mutex_unlock(&abba->b_mutex);
+       ww_acquire_fini(&ctx);
+
+       abba->result = err;
+}
+
+static int test_abba(bool resolve)
+{
+       struct test_abba abba;
+       struct ww_acquire_ctx ctx;
+       int err, ret;
+
+       ww_mutex_init(&abba.a_mutex, &ww_class);
+       ww_mutex_init(&abba.b_mutex, &ww_class);
+       INIT_WORK_ONSTACK(&abba.work, test_abba_work);
+       init_completion(&abba.a_ready);
+       init_completion(&abba.b_ready);
+       abba.resolve = resolve;
+
+       schedule_work(&abba.work);
+
+       ww_acquire_init(&ctx, &ww_class);
+       ww_mutex_lock(&abba.a_mutex, &ctx);
+
+       complete(&abba.a_ready);
+       wait_for_completion(&abba.b_ready);
+
+       err = ww_mutex_lock(&abba.b_mutex, &ctx);
+       if (resolve && err == -EDEADLK) {
+               ww_mutex_unlock(&abba.a_mutex);
+               ww_mutex_lock_slow(&abba.b_mutex, &ctx);
+               err = ww_mutex_lock(&abba.a_mutex, &ctx);
+       }
+
+       if (!err)
+               ww_mutex_unlock(&abba.b_mutex);
+       ww_mutex_unlock(&abba.a_mutex);
+       ww_acquire_fini(&ctx);
+
+       flush_work(&abba.work);
+       destroy_work_on_stack(&abba.work);
+
+       ret = 0;
+       if (resolve) {
+               if (err || abba.result) {
+                       pr_err("%s: failed to resolve ABBA deadlock, A err=%d, B err=%d\n",
+                              __func__, err, abba.result);
+                       ret = -EINVAL;
+               }
+       } else {
+               if (err != -EDEADLK && abba.result != -EDEADLK) {
+                       pr_err("%s: missed ABBA deadlock, A err=%d, B err=%d\n",
+                              __func__, err, abba.result);
+                       ret = -EINVAL;
+               }
+       }
+       return ret;
+}
+
+struct test_cycle {
+       struct work_struct work;
+       struct ww_mutex a_mutex;
+       struct ww_mutex *b_mutex;
+       struct completion *a_signal;
+       struct completion b_signal;
+       int result;
+};
+
+static void test_cycle_work(struct work_struct *work)
+{
+       struct test_cycle *cycle = container_of(work, typeof(*cycle), work);
+       struct ww_acquire_ctx ctx;
+       int err;
+
+       ww_acquire_init(&ctx, &ww_class);
+       ww_mutex_lock(&cycle->a_mutex, &ctx);
+
+       complete(cycle->a_signal);
+       wait_for_completion(&cycle->b_signal);
+
+       err = ww_mutex_lock(cycle->b_mutex, &ctx);
+       if (err == -EDEADLK) {
+               ww_mutex_unlock(&cycle->a_mutex);
+               ww_mutex_lock_slow(cycle->b_mutex, &ctx);
+               err = ww_mutex_lock(&cycle->a_mutex, &ctx);
+       }
+
+       if (!err)
+               ww_mutex_unlock(cycle->b_mutex);
+       ww_mutex_unlock(&cycle->a_mutex);
+       ww_acquire_fini(&ctx);
+
+       cycle->result = err;
+}
+
+static int __test_cycle(unsigned int nthreads)
+{
+       struct test_cycle *cycles;
+       unsigned int n, last = nthreads - 1;
+       int ret;
+
+       cycles = kmalloc_array(nthreads, sizeof(*cycles), GFP_KERNEL);
+       if (!cycles)
+               return -ENOMEM;
+
+       for (n = 0; n < nthreads; n++) {
+               struct test_cycle *cycle = &cycles[n];
+
+               ww_mutex_init(&cycle->a_mutex, &ww_class);
+               if (n == last)
+                       cycle->b_mutex = &cycles[0].a_mutex;
+               else
+                       cycle->b_mutex = &cycles[n + 1].a_mutex;
+
+               if (n == 0)
+                       cycle->a_signal = &cycles[last].b_signal;
+               else
+                       cycle->a_signal = &cycles[n - 1].b_signal;
+               init_completion(&cycle->b_signal);
+
+               INIT_WORK(&cycle->work, test_cycle_work);
+               cycle->result = 0;
+       }
+
+       for (n = 0; n < nthreads; n++)
+               queue_work(wq, &cycles[n].work);
+
+       flush_workqueue(wq);
+
+       ret = 0;
+       for (n = 0; n < nthreads; n++) {
+               struct test_cycle *cycle = &cycles[n];
+
+               if (!cycle->result)
+                       continue;
+
+               pr_err("cylic deadlock not resolved, ret[%d/%d] = %d\n",
+                      n, nthreads, cycle->result);
+               ret = -EINVAL;
+               break;
+       }
+
+       for (n = 0; n < nthreads; n++)
+               ww_mutex_destroy(&cycles[n].a_mutex);
+       kfree(cycles);
+       return ret;
+}
+
+static int test_cycle(unsigned int ncpus)
+{
+       unsigned int n;
+       int ret;
+
+       for (n = 2; n <= ncpus + 1; n++) {
+               ret = __test_cycle(n);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
+struct stress {
+       struct work_struct work;
+       struct ww_mutex *locks;
+       int nlocks;
+       int nloops;
+};
+
+static int *get_random_order(int count)
+{
+       int *order;
+       int n, r, tmp;
+
+       order = kmalloc_array(count, sizeof(*order), GFP_TEMPORARY);
+       if (!order)
+               return order;
+
+       for (n = 0; n < count; n++)
+               order[n] = n;
+
+       for (n = count - 1; n > 1; n--) {
+               r = get_random_int() % (n + 1);
+               if (r != n) {
+                       tmp = order[n];
+                       order[n] = order[r];
+                       order[r] = tmp;
+               }
+       }
+
+       return order;
+}
+
+static void dummy_load(struct stress *stress)
+{
+       usleep_range(1000, 2000);
+}
+
+static void stress_inorder_work(struct work_struct *work)
+{
+       struct stress *stress = container_of(work, typeof(*stress), work);
+       const int nlocks = stress->nlocks;
+       struct ww_mutex *locks = stress->locks;
+       struct ww_acquire_ctx ctx;
+       int *order;
+
+       order = get_random_order(nlocks);
+       if (!order)
+               return;
+
+       ww_acquire_init(&ctx, &ww_class);
+
+       do {
+               int contended = -1;
+               int n, err;
+
+retry:
+               err = 0;
+               for (n = 0; n < nlocks; n++) {
+                       if (n == contended)
+                               continue;
+
+                       err = ww_mutex_lock(&locks[order[n]], &ctx);
+                       if (err < 0)
+                               break;
+               }
+               if (!err)
+                       dummy_load(stress);
+
+               if (contended > n)
+                       ww_mutex_unlock(&locks[order[contended]]);
+               contended = n;
+               while (n--)
+                       ww_mutex_unlock(&locks[order[n]]);
+
+               if (err == -EDEADLK) {
+                       ww_mutex_lock_slow(&locks[order[contended]], &ctx);
+                       goto retry;
+               }
+
+               if (err) {
+                       pr_err_once("stress (%s) failed with %d\n",
+                                   __func__, err);
+                       break;
+               }
+       } while (--stress->nloops);
+
+       ww_acquire_fini(&ctx);
+
+       kfree(order);
+       kfree(stress);
+}
+
+struct reorder_lock {
+       struct list_head link;
+       struct ww_mutex *lock;
+};
+
+static void stress_reorder_work(struct work_struct *work)
+{
+       struct stress *stress = container_of(work, typeof(*stress), work);
+       LIST_HEAD(locks);
+       struct ww_acquire_ctx ctx;
+       struct reorder_lock *ll, *ln;
+       int *order;
+       int n, err;
+
+       order = get_random_order(stress->nlocks);
+       if (!order)
+               return;
+
+       for (n = 0; n < stress->nlocks; n++) {
+               ll = kmalloc(sizeof(*ll), GFP_KERNEL);
+               if (!ll)
+                       goto out;
+
+               ll->lock = &stress->locks[order[n]];
+               list_add(&ll->link, &locks);
+       }
+       kfree(order);
+       order = NULL;
+
+       ww_acquire_init(&ctx, &ww_class);
+
+       do {
+               list_for_each_entry(ll, &locks, link) {
+                       err = ww_mutex_lock(ll->lock, &ctx);
+                       if (!err)
+                               continue;
+
+                       ln = ll;
+                       list_for_each_entry_continue_reverse(ln, &locks, link)
+                               ww_mutex_unlock(ln->lock);
+
+                       if (err != -EDEADLK) {
+                               pr_err_once("stress (%s) failed with %d\n",
+                                           __func__, err);
+                               break;
+                       }
+
+                       ww_mutex_lock_slow(ll->lock, &ctx);
+                       list_move(&ll->link, &locks); /* restarts iteration */
+               }
+
+               dummy_load(stress);
+               list_for_each_entry(ll, &locks, link)
+                       ww_mutex_unlock(ll->lock);
+       } while (--stress->nloops);
+
+       ww_acquire_fini(&ctx);
+
+out:
+       list_for_each_entry_safe(ll, ln, &locks, link)
+               kfree(ll);
+       kfree(order);
+       kfree(stress);
+}
+
+static void stress_one_work(struct work_struct *work)
+{
+       struct stress *stress = container_of(work, typeof(*stress), work);
+       const int nlocks = stress->nlocks;
+       struct ww_mutex *lock = stress->locks + (get_random_int() % nlocks);
+       int err;
+
+       do {
+               err = ww_mutex_lock(lock, NULL);
+               if (!err) {
+                       dummy_load(stress);
+                       ww_mutex_unlock(lock);
+               } else {
+                       pr_err_once("stress (%s) failed with %d\n",
+                                   __func__, err);
+                       break;
+               }
+       } while (--stress->nloops);
+
+       kfree(stress);
+}
+
+#define STRESS_INORDER BIT(0)
+#define STRESS_REORDER BIT(1)
+#define STRESS_ONE BIT(2)
+#define STRESS_ALL (STRESS_INORDER | STRESS_REORDER | STRESS_ONE)
+
+static int stress(int nlocks, int nthreads, int nloops, unsigned int flags)
+{
+       struct ww_mutex *locks;
+       int n;
+
+       locks = kmalloc_array(nlocks, sizeof(*locks), GFP_KERNEL);
+       if (!locks)
+               return -ENOMEM;
+
+       for (n = 0; n < nlocks; n++)
+               ww_mutex_init(&locks[n], &ww_class);
+
+       for (n = 0; nthreads; n++) {
+               struct stress *stress;
+               void (*fn)(struct work_struct *work);
+
+               fn = NULL;
+               switch (n & 3) {
+               case 0:
+                       if (flags & STRESS_INORDER)
+                               fn = stress_inorder_work;
+                       break;
+               case 1:
+                       if (flags & STRESS_REORDER)
+                               fn = stress_reorder_work;
+                       break;
+               case 2:
+                       if (flags & STRESS_ONE)
+                               fn = stress_one_work;
+                       break;
+               }
+
+               if (!fn)
+                       continue;
+
+               stress = kmalloc(sizeof(*stress), GFP_KERNEL);
+               if (!stress)
+                       break;
+
+               INIT_WORK(&stress->work, fn);
+               stress->locks = locks;
+               stress->nlocks = nlocks;
+               stress->nloops = nloops;
+
+               queue_work(wq, &stress->work);
+               nthreads--;
+       }
+
+       flush_workqueue(wq);
+
+       for (n = 0; n < nlocks; n++)
+               ww_mutex_destroy(&locks[n]);
+       kfree(locks);
+
+       return 0;
+}
+
+static int __init test_ww_mutex_init(void)
+{
+       int ncpus = num_online_cpus();
+       int ret;
+
+       wq = alloc_workqueue("test-ww_mutex", WQ_UNBOUND, 0);
+       if (!wq)
+               return -ENOMEM;
+
+       ret = test_mutex();
+       if (ret)
+               return ret;
+
+       ret = test_aa();
+       if (ret)
+               return ret;
+
+       ret = test_abba(false);
+       if (ret)
+               return ret;
+
+       ret = test_abba(true);
+       if (ret)
+               return ret;
+
+       ret = test_cycle(ncpus);
+       if (ret)
+               return ret;
+
+       ret = stress(16, 2*ncpus, 1<<10, STRESS_INORDER);
+       if (ret)
+               return ret;
+
+       ret = stress(16, 2*ncpus, 1<<10, STRESS_REORDER);
+       if (ret)
+               return ret;
+
+       ret = stress(4096, hweight32(STRESS_ALL)*ncpus, 1<<12, STRESS_ALL);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static void __exit test_ww_mutex_exit(void)
+{
+       destroy_workqueue(wq);
+}
+
+module_init(test_ww_mutex_init);
+module_exit(test_ww_mutex_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Intel Corporation");
index 536c727a56e975e2e310fb3fb82474f07a1b916f..9f9284f37f8d8b6d3149ecc22634c613123e792d 100644 (file)
@@ -16,6 +16,7 @@
 
 #include <linux/syscalls.h>
 #include <linux/membarrier.h>
+#include <linux/tick.h>
 
 /*
  * Bitmask made from a "or" of all commands within enum membarrier_cmd,
@@ -51,6 +52,9 @@
  */
 SYSCALL_DEFINE2(membarrier, int, cmd, int, flags)
 {
+       /* MEMBARRIER_CMD_SHARED is not compatible with nohz_full. */
+       if (tick_nohz_full_enabled())
+               return -ENOSYS;
        if (unlikely(flags))
                return -EINVAL;
        switch (cmd) {
index 38d4270925d4d13619d725052aa3f9844f23bc96..3d8f126208e3ae04eeff3fd1b1e00044c0e3d0d2 100644 (file)
@@ -389,16 +389,16 @@ extern const struct kernel_symbol __start___ksymtab_gpl[];
 extern const struct kernel_symbol __stop___ksymtab_gpl[];
 extern const struct kernel_symbol __start___ksymtab_gpl_future[];
 extern const struct kernel_symbol __stop___ksymtab_gpl_future[];
-extern const unsigned long __start___kcrctab[];
-extern const unsigned long __start___kcrctab_gpl[];
-extern const unsigned long __start___kcrctab_gpl_future[];
+extern const s32 __start___kcrctab[];
+extern const s32 __start___kcrctab_gpl[];
+extern const s32 __start___kcrctab_gpl_future[];
 #ifdef CONFIG_UNUSED_SYMBOLS
 extern const struct kernel_symbol __start___ksymtab_unused[];
 extern const struct kernel_symbol __stop___ksymtab_unused[];
 extern const struct kernel_symbol __start___ksymtab_unused_gpl[];
 extern const struct kernel_symbol __stop___ksymtab_unused_gpl[];
-extern const unsigned long __start___kcrctab_unused[];
-extern const unsigned long __start___kcrctab_unused_gpl[];
+extern const s32 __start___kcrctab_unused[];
+extern const s32 __start___kcrctab_unused_gpl[];
 #endif
 
 #ifndef CONFIG_MODVERSIONS
@@ -497,7 +497,7 @@ struct find_symbol_arg {
 
        /* Output */
        struct module *owner;
-       const unsigned long *crc;
+       const s32 *crc;
        const struct kernel_symbol *sym;
 };
 
@@ -563,7 +563,7 @@ static bool find_symbol_in_section(const struct symsearch *syms,
  * (optional) module which owns it.  Needs preempt disabled or module_mutex. */
 const struct kernel_symbol *find_symbol(const char *name,
                                        struct module **owner,
-                                       const unsigned long **crc,
+                                       const s32 **crc,
                                        bool gplok,
                                        bool warn)
 {
@@ -1249,23 +1249,17 @@ static int try_to_force_load(struct module *mod, const char *reason)
 }
 
 #ifdef CONFIG_MODVERSIONS
-/* If the arch applies (non-zero) relocations to kernel kcrctab, unapply it. */
-static unsigned long maybe_relocated(unsigned long crc,
-                                    const struct module *crc_owner)
+
+static u32 resolve_rel_crc(const s32 *crc)
 {
-#ifdef ARCH_RELOCATES_KCRCTAB
-       if (crc_owner == NULL)
-               return crc - (unsigned long)reloc_start;
-#endif
-       return crc;
+       return *(u32 *)((void *)crc + *crc);
 }
 
 static int check_version(Elf_Shdr *sechdrs,
                         unsigned int versindex,
                         const char *symname,
                         struct module *mod,
-                        const unsigned long *crc,
-                        const struct module *crc_owner)
+                        const s32 *crc)
 {
        unsigned int i, num_versions;
        struct modversion_info *versions;
@@ -1283,13 +1277,19 @@ static int check_version(Elf_Shdr *sechdrs,
                / sizeof(struct modversion_info);
 
        for (i = 0; i < num_versions; i++) {
+               u32 crcval;
+
                if (strcmp(versions[i].name, symname) != 0)
                        continue;
 
-               if (versions[i].crc == maybe_relocated(*crc, crc_owner))
+               if (IS_ENABLED(CONFIG_MODULE_REL_CRCS))
+                       crcval = resolve_rel_crc(crc);
+               else
+                       crcval = *crc;
+               if (versions[i].crc == crcval)
                        return 1;
-               pr_debug("Found checksum %lX vs module %lX\n",
-                      maybe_relocated(*crc, crc_owner), versions[i].crc);
+               pr_debug("Found checksum %X vs module %lX\n",
+                        crcval, versions[i].crc);
                goto bad_version;
        }
 
@@ -1307,7 +1307,7 @@ static inline int check_modstruct_version(Elf_Shdr *sechdrs,
                                          unsigned int versindex,
                                          struct module *mod)
 {
-       const unsigned long *crc;
+       const s32 *crc;
 
        /*
         * Since this should be found in kernel (which can't be removed), no
@@ -1321,8 +1321,7 @@ static inline int check_modstruct_version(Elf_Shdr *sechdrs,
        }
        preempt_enable();
        return check_version(sechdrs, versindex,
-                            VMLINUX_SYMBOL_STR(module_layout), mod, crc,
-                            NULL);
+                            VMLINUX_SYMBOL_STR(module_layout), mod, crc);
 }
 
 /* First part is kernel version, which we ignore if module has crcs. */
@@ -1340,8 +1339,7 @@ static inline int check_version(Elf_Shdr *sechdrs,
                                unsigned int versindex,
                                const char *symname,
                                struct module *mod,
-                               const unsigned long *crc,
-                               const struct module *crc_owner)
+                               const s32 *crc)
 {
        return 1;
 }
@@ -1368,7 +1366,7 @@ static const struct kernel_symbol *resolve_symbol(struct module *mod,
 {
        struct module *owner;
        const struct kernel_symbol *sym;
-       const unsigned long *crc;
+       const s32 *crc;
        int err;
 
        /*
@@ -1383,8 +1381,7 @@ static const struct kernel_symbol *resolve_symbol(struct module *mod,
        if (!sym)
                goto unlock;
 
-       if (!check_version(info->sechdrs, info->index.vers, name, mod, crc,
-                          owner)) {
+       if (!check_version(info->sechdrs, info->index.vers, name, mod, crc)) {
                sym = ERR_PTR(-EINVAL);
                goto getname;
        }
index 901c4fb46002e38c98394110c49aa50230d58180..08aa88dde7de806d4cb2b14fd93e87be8dd94501 100644 (file)
@@ -249,7 +249,7 @@ void panic(const char *fmt, ...)
                 * Delay timeout seconds before rebooting the machine.
                 * We can't use the "normal" timers since we just panicked.
                 */
-               pr_emerg("Rebooting in %d seconds..", panic_timeout);
+               pr_emerg("Rebooting in %d seconds..\n", panic_timeout);
 
                for (i = 0; i < panic_timeout * 1000; i += PANIC_TIMER_STEP) {
                        touch_nmi_watchdog();
index f66162f2359bfb2e85777ce4c5faed637a15db60..0291804151b587e880ed6e2072bc4217c4694db5 100644 (file)
@@ -68,9 +68,7 @@ static inline int mk_pid(struct pid_namespace *pid_ns,
  * the scheme scales to up to 4 million PIDs, runtime.
  */
 struct pid_namespace init_pid_ns = {
-       .kref = {
-               .refcount       = ATOMIC_INIT(2),
-       },
+       .kref = KREF_INIT(2),
        .pidmap = {
                [ 0 ... PIDMAP_ENTRIES-1] = { ATOMIC_INIT(BITS_PER_PAGE), NULL }
        },
index f67ceb7768b82ac4e183b22042f1f0784011262b..15e6baef5c73f90b6817c0b1c4e871ea40e30318 100644 (file)
@@ -46,7 +46,7 @@ static const char * const mem_sleep_labels[] = {
 const char *mem_sleep_states[PM_SUSPEND_MAX];
 
 suspend_state_t mem_sleep_current = PM_SUSPEND_FREEZE;
-suspend_state_t mem_sleep_default = PM_SUSPEND_MAX;
+static suspend_state_t mem_sleep_default = PM_SUSPEND_MEM;
 
 unsigned int pm_suspend_global_flags;
 EXPORT_SYMBOL_GPL(pm_suspend_global_flags);
@@ -168,7 +168,7 @@ void suspend_set_ops(const struct platform_suspend_ops *ops)
        }
        if (valid_state(PM_SUSPEND_MEM)) {
                mem_sleep_states[PM_SUSPEND_MEM] = mem_sleep_labels[PM_SUSPEND_MEM];
-               if (mem_sleep_default >= PM_SUSPEND_MEM)
+               if (mem_sleep_default == PM_SUSPEND_MEM)
                        mem_sleep_current = PM_SUSPEND_MEM;
        }
 
index bdff5ed57f10a5ef57a015856830471422f3918a..5db217051232de97afcc20208ee504ae3d06a430 100644 (file)
@@ -166,7 +166,7 @@ static int __init setup_test_suspend(char *value)
                        return 0;
        }
 
-       for (i = 0; pm_labels[i]; i++)
+       for (i = PM_SUSPEND_MIN; i < PM_SUSPEND_MAX; i++)
                if (!strcmp(pm_labels[i], suspend_type)) {
                        test_state_label = pm_labels[i];
                        return 0;
index 32e0c232efbafa4a3c8f6f40468c61eaad937d5b..f80fd33639e0e5f5b4fc7b1fb53ecfd87b1eaa4d 100644 (file)
@@ -201,7 +201,7 @@ void free_all_swap_pages(int swap)
                struct swsusp_extent *ext;
                unsigned long offset;
 
-               ext = container_of(node, struct swsusp_extent, node);
+               ext = rb_entry(node, struct swsusp_extent, node);
                rb_erase(node, &swsusp_extents);
                for (offset = ext->start; offset <= ext->end; offset++)
                        swap_free(swp_entry(swap, offset));
index 8b2696420abb5de7347429f52d9a3d46d4dbcd24..4ba3d34938c033740b19ad27c74c01a8cdaef2b4 100644 (file)
@@ -1516,7 +1516,7 @@ static void call_console_drivers(int level,
 {
        struct console *con;
 
-       trace_console(text, len);
+       trace_console_rcuidle(text, len);
 
        if (!console_drivers)
                return;
index 87c51225ceeca991bf64505bac8eedf3e23e44cf..d81345be730ea5134e38acb5e85b93c1c3882b39 100644 (file)
@@ -564,10 +564,25 @@ static void srcu_torture_stats(void)
        pr_alert("%s%s per-CPU(idx=%d):",
                 torture_type, TORTURE_FLAG, idx);
        for_each_possible_cpu(cpu) {
+               unsigned long l0, l1;
+               unsigned long u0, u1;
                long c0, c1;
+               struct srcu_array *counts = per_cpu_ptr(srcu_ctlp->per_cpu_ref, cpu);
 
-               c0 = (long)per_cpu_ptr(srcu_ctlp->per_cpu_ref, cpu)->c[!idx];
-               c1 = (long)per_cpu_ptr(srcu_ctlp->per_cpu_ref, cpu)->c[idx];
+               u0 = counts->unlock_count[!idx];
+               u1 = counts->unlock_count[idx];
+
+               /*
+                * Make sure that a lock is always counted if the corresponding
+                * unlock is counted.
+                */
+               smp_rmb();
+
+               l0 = counts->lock_count[!idx];
+               l1 = counts->lock_count[idx];
+
+               c0 = l0 - u0;
+               c1 = l1 - u1;
                pr_cont(" %d(%ld,%ld)", cpu, c0, c1);
        }
        pr_cont("\n");
index 9b9cdd549caa848111247b35b19109da7af9099d..e773129c8b08d29d0ce81cbdbc195bed905b2562 100644 (file)
@@ -106,7 +106,7 @@ static int init_srcu_struct_fields(struct srcu_struct *sp)
        rcu_batch_init(&sp->batch_check1);
        rcu_batch_init(&sp->batch_done);
        INIT_DELAYED_WORK(&sp->work, process_srcu);
-       sp->per_cpu_ref = alloc_percpu(struct srcu_struct_array);
+       sp->per_cpu_ref = alloc_percpu(struct srcu_array);
        return sp->per_cpu_ref ? 0 : -ENOMEM;
 }
 
@@ -141,114 +141,77 @@ EXPORT_SYMBOL_GPL(init_srcu_struct);
 #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
 
 /*
- * Returns approximate total of the readers' ->seq[] values for the
+ * Returns approximate total of the readers' ->lock_count[] values for the
  * rank of per-CPU counters specified by idx.
  */
-static unsigned long srcu_readers_seq_idx(struct srcu_struct *sp, int idx)
+static unsigned long srcu_readers_lock_idx(struct srcu_struct *sp, int idx)
 {
        int cpu;
        unsigned long sum = 0;
-       unsigned long t;
 
        for_each_possible_cpu(cpu) {
-               t = READ_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->seq[idx]);
-               sum += t;
+               struct srcu_array *cpuc = per_cpu_ptr(sp->per_cpu_ref, cpu);
+
+               sum += READ_ONCE(cpuc->lock_count[idx]);
        }
        return sum;
 }
 
 /*
- * Returns approximate number of readers active on the specified rank
- * of the per-CPU ->c[] counters.
+ * Returns approximate total of the readers' ->unlock_count[] values for the
+ * rank of per-CPU counters specified by idx.
  */
-static unsigned long srcu_readers_active_idx(struct srcu_struct *sp, int idx)
+static unsigned long srcu_readers_unlock_idx(struct srcu_struct *sp, int idx)
 {
        int cpu;
        unsigned long sum = 0;
-       unsigned long t;
 
        for_each_possible_cpu(cpu) {
-               t = READ_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->c[idx]);
-               sum += t;
+               struct srcu_array *cpuc = per_cpu_ptr(sp->per_cpu_ref, cpu);
+
+               sum += READ_ONCE(cpuc->unlock_count[idx]);
        }
        return sum;
 }
 
 /*
  * Return true if the number of pre-existing readers is determined to
- * be stably zero.  An example unstable zero can occur if the call
- * to srcu_readers_active_idx() misses an __srcu_read_lock() increment,
- * but due to task migration, sees the corresponding __srcu_read_unlock()
- * decrement.  This can happen because srcu_readers_active_idx() takes
- * time to sum the array, and might in fact be interrupted or preempted
- * partway through the summation.
+ * be zero.
  */
 static bool srcu_readers_active_idx_check(struct srcu_struct *sp, int idx)
 {
-       unsigned long seq;
+       unsigned long unlocks;
 
-       seq = srcu_readers_seq_idx(sp, idx);
+       unlocks = srcu_readers_unlock_idx(sp, idx);
 
        /*
-        * The following smp_mb() A pairs with the smp_mb() B located in
-        * __srcu_read_lock().  This pairing ensures that if an
-        * __srcu_read_lock() increments its counter after the summation
-        * in srcu_readers_active_idx(), then the corresponding SRCU read-side
-        * critical section will see any changes made prior to the start
-        * of the current SRCU grace period.
+        * Make sure that a lock is always counted if the corresponding unlock
+        * is counted. Needs to be a smp_mb() as the read side may contain a
+        * read from a variable that is written to before the synchronize_srcu()
+        * in the write side. In this case smp_mb()s A and B act like the store
+        * buffering pattern.
         *
-        * Also, if the above call to srcu_readers_seq_idx() saw the
-        * increment of ->seq[], then the call to srcu_readers_active_idx()
-        * must see the increment of ->c[].
+        * This smp_mb() also pairs with smp_mb() C to prevent accesses after the
+        * synchronize_srcu() from being executed before the grace period ends.
         */
        smp_mb(); /* A */
 
        /*
-        * Note that srcu_readers_active_idx() can incorrectly return
-        * zero even though there is a pre-existing reader throughout.
-        * To see this, suppose that task A is in a very long SRCU
-        * read-side critical section that started on CPU 0, and that
-        * no other reader exists, so that the sum of the counters
-        * is equal to one.  Then suppose that task B starts executing
-        * srcu_readers_active_idx(), summing up to CPU 1, and then that
-        * task C starts reading on CPU 0, so that its increment is not
-        * summed, but finishes reading on CPU 2, so that its decrement
-        * -is- summed.  Then when task B completes its sum, it will
-        * incorrectly get zero, despite the fact that task A has been
-        * in its SRCU read-side critical section the whole time.
-        *
-        * We therefore do a validation step should srcu_readers_active_idx()
-        * return zero.
-        */
-       if (srcu_readers_active_idx(sp, idx) != 0)
-               return false;
-
-       /*
-        * The remainder of this function is the validation step.
-        * The following smp_mb() D pairs with the smp_mb() C in
-        * __srcu_read_unlock().  If the __srcu_read_unlock() was seen
-        * by srcu_readers_active_idx() above, then any destructive
-        * operation performed after the grace period will happen after
-        * the corresponding SRCU read-side critical section.
+        * If the locks are the same as the unlocks, then there must have
+        * been no readers on this index at some time in between. This does not
+        * mean that there are no more readers, as one could have read the
+        * current index but not have incremented the lock counter yet.
         *
-        * Note that there can be at most NR_CPUS worth of readers using
-        * the old index, which is not enough to overflow even a 32-bit
-        * integer.  (Yes, this does mean that systems having more than
-        * a billion or so CPUs need to be 64-bit systems.)  Therefore,
-        * the sum of the ->seq[] counters cannot possibly overflow.
-        * Therefore, the only way that the return values of the two
-        * calls to srcu_readers_seq_idx() can be equal is if there were
-        * no increments of the corresponding rank of ->seq[] counts
-        * in the interim.  But the missed-increment scenario laid out
-        * above includes an increment of the ->seq[] counter by
-        * the corresponding __srcu_read_lock().  Therefore, if this
-        * scenario occurs, the return values from the two calls to
-        * srcu_readers_seq_idx() will differ, and thus the validation
-        * step below suffices.
+        * Possible bug: There is no guarantee that there haven't been ULONG_MAX
+        * increments of ->lock_count[] since the unlocks were counted, meaning
+        * that this could return true even if there are still active readers.
+        * Since there are no memory barriers around srcu_flip(), the CPU is not
+        * required to increment ->completed before running
+        * srcu_readers_unlock_idx(), which means that there could be an
+        * arbitrarily large number of critical sections that execute after
+        * srcu_readers_unlock_idx() but use the old value of ->completed.
         */
-       smp_mb(); /* D */
-
-       return srcu_readers_seq_idx(sp, idx) == seq;
+       return srcu_readers_lock_idx(sp, idx) == unlocks;
 }
 
 /**
@@ -266,8 +229,12 @@ static bool srcu_readers_active(struct srcu_struct *sp)
        unsigned long sum = 0;
 
        for_each_possible_cpu(cpu) {
-               sum += READ_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->c[0]);
-               sum += READ_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->c[1]);
+               struct srcu_array *cpuc = per_cpu_ptr(sp->per_cpu_ref, cpu);
+
+               sum += READ_ONCE(cpuc->lock_count[0]);
+               sum += READ_ONCE(cpuc->lock_count[1]);
+               sum -= READ_ONCE(cpuc->unlock_count[0]);
+               sum -= READ_ONCE(cpuc->unlock_count[1]);
        }
        return sum;
 }
@@ -298,9 +265,8 @@ int __srcu_read_lock(struct srcu_struct *sp)
        int idx;
 
        idx = READ_ONCE(sp->completed) & 0x1;
-       __this_cpu_inc(sp->per_cpu_ref->c[idx]);
+       __this_cpu_inc(sp->per_cpu_ref->lock_count[idx]);
        smp_mb(); /* B */  /* Avoid leaking the critical section. */
-       __this_cpu_inc(sp->per_cpu_ref->seq[idx]);
        return idx;
 }
 EXPORT_SYMBOL_GPL(__srcu_read_lock);
@@ -314,7 +280,7 @@ EXPORT_SYMBOL_GPL(__srcu_read_lock);
 void __srcu_read_unlock(struct srcu_struct *sp, int idx)
 {
        smp_mb(); /* C */  /* Avoid leaking the critical section. */
-       this_cpu_dec(sp->per_cpu_ref->c[idx]);
+       this_cpu_inc(sp->per_cpu_ref->unlock_count[idx]);
 }
 EXPORT_SYMBOL_GPL(__srcu_read_unlock);
 
@@ -349,12 +315,21 @@ static bool try_check_zero(struct srcu_struct *sp, int idx, int trycount)
 
 /*
  * Increment the ->completed counter so that future SRCU readers will
- * use the other rank of the ->c[] and ->seq[] arrays.  This allows
+ * use the other rank of the ->(un)lock_count[] arrays.  This allows
  * us to wait for pre-existing readers in a starvation-free manner.
  */
 static void srcu_flip(struct srcu_struct *sp)
 {
-       sp->completed++;
+       WRITE_ONCE(sp->completed, sp->completed + 1);
+
+       /*
+        * Ensure that if the updater misses an __srcu_read_unlock()
+        * increment, that task's next __srcu_read_lock() will see the
+        * above counter update.  Note that both this memory barrier
+        * and the one in srcu_readers_active_idx_check() provide the
+        * guarantee for __srcu_read_lock().
+        */
+       smp_mb(); /* D */  /* Pairs with C. */
 }
 
 /*
@@ -392,6 +367,7 @@ void call_srcu(struct srcu_struct *sp, struct rcu_head *head,
        head->next = NULL;
        head->func = func;
        spin_lock_irqsave(&sp->queue_lock, flags);
+       smp_mb__after_unlock_lock(); /* Caller's prior accesses before GP. */
        rcu_batch_queue(&sp->batch_queue, head);
        if (!sp->running) {
                sp->running = true;
@@ -425,6 +401,7 @@ static void __synchronize_srcu(struct srcu_struct *sp, int trycount)
        head->next = NULL;
        head->func = wakeme_after_rcu;
        spin_lock_irq(&sp->queue_lock);
+       smp_mb__after_unlock_lock(); /* Caller's prior accesses before GP. */
        if (!sp->running) {
                /* steal the processing owner */
                sp->running = true;
@@ -444,8 +421,11 @@ static void __synchronize_srcu(struct srcu_struct *sp, int trycount)
                spin_unlock_irq(&sp->queue_lock);
        }
 
-       if (!done)
+       if (!done) {
                wait_for_completion(&rcu.completion);
+               smp_mb(); /* Caller's later accesses after GP. */
+       }
+
 }
 
 /**
@@ -613,7 +593,8 @@ static void srcu_advance_batches(struct srcu_struct *sp, int trycount)
 /*
  * Invoke a limited number of SRCU callbacks that have passed through
  * their grace period.  If there are more to do, SRCU will reschedule
- * the workqueue.
+ * the workqueue.  Note that needed memory barriers have been executed
+ * in this task's context by srcu_readers_active_idx_check().
  */
 static void srcu_invoke_callbacks(struct srcu_struct *sp)
 {
index b23a4d076f3d2c64862172c83c18f21605e87159..fa6a48d3917bf26ce12f18aa2c94de0c65854323 100644 (file)
@@ -41,8 +41,6 @@
 
 /* Forward declarations for tiny_plugin.h. */
 struct rcu_ctrlblk;
-static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
-static void rcu_process_callbacks(struct softirq_action *unused);
 static void __call_rcu(struct rcu_head *head,
                       rcu_callback_t func,
                       struct rcu_ctrlblk *rcp);
index cb4e2056ccf3cf799bb7c045aca346fedb2ed698..d80e0d2f68c675de3d95a89ad22a09465a2a995b 100644 (file)
@@ -281,6 +281,116 @@ static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
 #endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
 };
 
+/*
+ * Record entry into an extended quiescent state.  This is only to be
+ * called when not already in an extended quiescent state.
+ */
+static void rcu_dynticks_eqs_enter(void)
+{
+       struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
+       int special;
+
+       /*
+        * CPUs seeing atomic_inc_return() must see prior RCU read-side
+        * critical sections, and we also must force ordering with the
+        * next idle sojourn.
+        */
+       special = atomic_inc_return(&rdtp->dynticks);
+       WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && special & 0x1);
+}
+
+/*
+ * Record exit from an extended quiescent state.  This is only to be
+ * called from an extended quiescent state.
+ */
+static void rcu_dynticks_eqs_exit(void)
+{
+       struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
+       int special;
+
+       /*
+        * CPUs seeing atomic_inc_return() must see prior idle sojourns,
+        * and we also must force ordering with the next RCU read-side
+        * critical section.
+        */
+       special = atomic_inc_return(&rdtp->dynticks);
+       WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !(special & 0x1));
+}
+
+/*
+ * Reset the current CPU's ->dynticks counter to indicate that the
+ * newly onlined CPU is no longer in an extended quiescent state.
+ * This will either leave the counter unchanged, or increment it
+ * to the next non-quiescent value.
+ *
+ * The non-atomic test/increment sequence works because the upper bits
+ * of the ->dynticks counter are manipulated only by the corresponding CPU,
+ * or when the corresponding CPU is offline.
+ */
+static void rcu_dynticks_eqs_online(void)
+{
+       struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
+
+       if (atomic_read(&rdtp->dynticks) & 0x1)
+               return;
+       atomic_add(0x1, &rdtp->dynticks);
+}
+
+/*
+ * Is the current CPU in an extended quiescent state?
+ *
+ * No ordering, as we are sampling CPU-local information.
+ */
+bool rcu_dynticks_curr_cpu_in_eqs(void)
+{
+       struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
+
+       return !(atomic_read(&rdtp->dynticks) & 0x1);
+}
+
+/*
+ * Snapshot the ->dynticks counter with full ordering so as to allow
+ * stable comparison of this counter with past and future snapshots.
+ */
+int rcu_dynticks_snap(struct rcu_dynticks *rdtp)
+{
+       int snap = atomic_add_return(0, &rdtp->dynticks);
+
+       return snap;
+}
+
+/*
+ * Return true if the snapshot returned from rcu_dynticks_snap()
+ * indicates that RCU is in an extended quiescent state.
+ */
+static bool rcu_dynticks_in_eqs(int snap)
+{
+       return !(snap & 0x1);
+}
+
+/*
+ * Return true if the CPU corresponding to the specified rcu_dynticks
+ * structure has spent some time in an extended quiescent state since
+ * rcu_dynticks_snap() returned the specified snapshot.
+ */
+static bool rcu_dynticks_in_eqs_since(struct rcu_dynticks *rdtp, int snap)
+{
+       return snap != rcu_dynticks_snap(rdtp);
+}
+
+/*
+ * Do a double-increment of the ->dynticks counter to emulate a
+ * momentary idle-CPU quiescent state.
+ */
+static void rcu_dynticks_momentary_idle(void)
+{
+       struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
+       int special = atomic_add_return(2, &rdtp->dynticks);
+
+       /* It is illegal to call this from idle state. */
+       WARN_ON_ONCE(!(special & 0x1));
+}
+
 DEFINE_PER_CPU_SHARED_ALIGNED(unsigned long, rcu_qs_ctr);
 EXPORT_PER_CPU_SYMBOL_GPL(rcu_qs_ctr);
 
@@ -300,7 +410,6 @@ EXPORT_PER_CPU_SYMBOL_GPL(rcu_qs_ctr);
 static void rcu_momentary_dyntick_idle(void)
 {
        struct rcu_data *rdp;
-       struct rcu_dynticks *rdtp;
        int resched_mask;
        struct rcu_state *rsp;
 
@@ -327,10 +436,7 @@ static void rcu_momentary_dyntick_idle(void)
                 * quiescent state, with no need for this CPU to do anything
                 * further.
                 */
-               rdtp = this_cpu_ptr(&rcu_dynticks);
-               smp_mb__before_atomic(); /* Earlier stuff before QS. */
-               atomic_add(2, &rdtp->dynticks);  /* QS. */
-               smp_mb__after_atomic(); /* Later stuff after QS. */
+               rcu_dynticks_momentary_idle();
                break;
        }
 }
@@ -611,7 +717,7 @@ static int
 cpu_has_callbacks_ready_to_invoke(struct rcu_data *rdp)
 {
        return &rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL] &&
-              rdp->nxttail[RCU_DONE_TAIL] != NULL;
+              rdp->nxttail[RCU_NEXT_TAIL] != NULL;
 }
 
 /*
@@ -673,7 +779,7 @@ static void rcu_eqs_enter_common(long long oldval, bool user)
 {
        struct rcu_state *rsp;
        struct rcu_data *rdp;
-       struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
+       RCU_TRACE(struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);)
 
        trace_rcu_dyntick(TPS("Start"), oldval, rdtp->dynticks_nesting);
        if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
@@ -692,12 +798,7 @@ static void rcu_eqs_enter_common(long long oldval, bool user)
                do_nocb_deferred_wakeup(rdp);
        }
        rcu_prepare_for_idle();
-       /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
-       smp_mb__before_atomic();  /* See above. */
-       atomic_inc(&rdtp->dynticks);
-       smp_mb__after_atomic();  /* Force ordering with next sojourn. */
-       WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
-                    atomic_read(&rdtp->dynticks) & 0x1);
+       rcu_dynticks_eqs_enter();
        rcu_dynticks_task_enter();
 
        /*
@@ -826,15 +927,10 @@ void rcu_irq_exit_irqson(void)
  */
 static void rcu_eqs_exit_common(long long oldval, int user)
 {
-       struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
+       RCU_TRACE(struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);)
 
        rcu_dynticks_task_exit();
-       smp_mb__before_atomic();  /* Force ordering w/previous sojourn. */
-       atomic_inc(&rdtp->dynticks);
-       /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
-       smp_mb__after_atomic();  /* See above. */
-       WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
-                    !(atomic_read(&rdtp->dynticks) & 0x1));
+       rcu_dynticks_eqs_exit();
        rcu_cleanup_after_idle();
        trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting);
        if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
@@ -980,12 +1076,8 @@ void rcu_nmi_enter(void)
         * to be in the outermost NMI handler that interrupted an RCU-idle
         * period (observation due to Andy Lutomirski).
         */
-       if (!(atomic_read(&rdtp->dynticks) & 0x1)) {
-               smp_mb__before_atomic();  /* Force delay from prior write. */
-               atomic_inc(&rdtp->dynticks);
-               /* atomic_inc() before later RCU read-side crit sects */
-               smp_mb__after_atomic();  /* See above. */
-               WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
+       if (rcu_dynticks_curr_cpu_in_eqs()) {
+               rcu_dynticks_eqs_exit();
                incby = 1;
        }
        rdtp->dynticks_nmi_nesting += incby;
@@ -1010,7 +1102,7 @@ void rcu_nmi_exit(void)
         * to us!)
         */
        WARN_ON_ONCE(rdtp->dynticks_nmi_nesting <= 0);
-       WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
+       WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs());
 
        /*
         * If the nesting level is not 1, the CPU wasn't RCU-idle, so
@@ -1023,11 +1115,7 @@ void rcu_nmi_exit(void)
 
        /* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */
        rdtp->dynticks_nmi_nesting = 0;
-       /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
-       smp_mb__before_atomic();  /* See above. */
-       atomic_inc(&rdtp->dynticks);
-       smp_mb__after_atomic();  /* Force delay to next write. */
-       WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
+       rcu_dynticks_eqs_enter();
 }
 
 /**
@@ -1040,7 +1128,7 @@ void rcu_nmi_exit(void)
  */
 bool notrace __rcu_is_watching(void)
 {
-       return atomic_read(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1;
+       return !rcu_dynticks_curr_cpu_in_eqs();
 }
 
 /**
@@ -1123,9 +1211,9 @@ static int rcu_is_cpu_rrupt_from_idle(void)
 static int dyntick_save_progress_counter(struct rcu_data *rdp,
                                         bool *isidle, unsigned long *maxj)
 {
-       rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
+       rdp->dynticks_snap = rcu_dynticks_snap(rdp->dynticks);
        rcu_sysidle_check_cpu(rdp, isidle, maxj);
-       if ((rdp->dynticks_snap & 0x1) == 0) {
+       if (rcu_dynticks_in_eqs(rdp->dynticks_snap)) {
                trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
                if (ULONG_CMP_LT(READ_ONCE(rdp->gpnum) + ULONG_MAX / 4,
                                 rdp->mynode->gpnum))
@@ -1144,12 +1232,10 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp,
 static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
                                    bool *isidle, unsigned long *maxj)
 {
-       unsigned int curr;
+       unsigned long jtsq;
        int *rcrmp;
-       unsigned int snap;
-
-       curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
-       snap = (unsigned int)rdp->dynticks_snap;
+       unsigned long rjtsc;
+       struct rcu_node *rnp;
 
        /*
         * If the CPU passed through or entered a dynticks idle phase with
@@ -1159,27 +1245,39 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
         * read-side critical section that started before the beginning
         * of the current RCU grace period.
         */
-       if ((curr & 0x1) == 0 || UINT_CMP_GE(curr, snap + 2)) {
+       if (rcu_dynticks_in_eqs_since(rdp->dynticks, rdp->dynticks_snap)) {
                trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
                rdp->dynticks_fqs++;
                return 1;
        }
 
+       /* Compute and saturate jiffies_till_sched_qs. */
+       jtsq = jiffies_till_sched_qs;
+       rjtsc = rcu_jiffies_till_stall_check();
+       if (jtsq > rjtsc / 2) {
+               WRITE_ONCE(jiffies_till_sched_qs, rjtsc);
+               jtsq = rjtsc / 2;
+       } else if (jtsq < 1) {
+               WRITE_ONCE(jiffies_till_sched_qs, 1);
+               jtsq = 1;
+       }
+
        /*
-        * Check for the CPU being offline, but only if the grace period
-        * is old enough.  We don't need to worry about the CPU changing
-        * state: If we see it offline even once, it has been through a
-        * quiescent state.
-        *
-        * The reason for insisting that the grace period be at least
-        * one jiffy old is that CPUs that are not quite online and that
-        * have just gone offline can still execute RCU read-side critical
-        * sections.
+        * Has this CPU encountered a cond_resched_rcu_qs() since the
+        * beginning of the grace period?  For this to be the case,
+        * the CPU has to have noticed the current grace period.  This
+        * might not be the case for nohz_full CPUs looping in the kernel.
         */
-       if (ULONG_CMP_GE(rdp->rsp->gp_start + 2, jiffies))
-               return 0;  /* Grace period is not old enough. */
-       barrier();
-       if (cpu_is_offline(rdp->cpu)) {
+       rnp = rdp->mynode;
+       if (time_after(jiffies, rdp->rsp->gp_start + jtsq) &&
+           READ_ONCE(rdp->rcu_qs_ctr_snap) != per_cpu(rcu_qs_ctr, rdp->cpu) &&
+           READ_ONCE(rdp->gpnum) == rnp->gpnum && !rdp->gpwrap) {
+               trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("rqc"));
+               return 1;
+       }
+
+       /* Check for the CPU being offline. */
+       if (!(rdp->grpmask & rcu_rnp_online_cpus(rnp))) {
                trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("ofl"));
                rdp->offline_fqs++;
                return 1;
@@ -1207,9 +1305,8 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
         * warning delay.
         */
        rcrmp = &per_cpu(rcu_sched_qs_mask, rdp->cpu);
-       if (ULONG_CMP_GE(jiffies,
-                        rdp->rsp->gp_start + jiffies_till_sched_qs) ||
-           ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) {
+       if (time_after(jiffies, rdp->rsp->gp_start + jtsq) ||
+           time_after(jiffies, rdp->rsp->jiffies_resched)) {
                if (!(READ_ONCE(*rcrmp) & rdp->rsp->flavor_mask)) {
                        WRITE_ONCE(rdp->cond_resched_completed,
                                   READ_ONCE(rdp->mynode->completed));
@@ -1220,11 +1317,12 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
                rdp->rsp->jiffies_resched += 5; /* Re-enable beating. */
        }
 
-       /* And if it has been a really long time, kick the CPU as well. */
-       if (ULONG_CMP_GE(jiffies,
-                        rdp->rsp->gp_start + 2 * jiffies_till_sched_qs) ||
-           ULONG_CMP_GE(jiffies, rdp->rsp->gp_start + jiffies_till_sched_qs))
-               resched_cpu(rdp->cpu);  /* Force CPU into scheduler. */
+       /*
+        * If more than halfway to RCU CPU stall-warning time, do
+        * a resched_cpu() to try to loosen things up a bit.
+        */
+       if (jiffies - rdp->rsp->gp_start > rcu_jiffies_till_stall_check() / 2)
+               resched_cpu(rdp->cpu);
 
        return 0;
 }
@@ -1277,7 +1375,10 @@ static void rcu_check_gp_kthread_starvation(struct rcu_state *rsp)
 }
 
 /*
- * Dump stacks of all tasks running on stalled CPUs.
+ * Dump stacks of all tasks running on stalled CPUs.  First try using
+ * NMIs, but fall back to manual remote stack tracing on architectures
+ * that don't support NMI-based stack dumps.  The NMI-triggered stack
+ * traces are more accurate because they are printed by the target CPU.
  */
 static void rcu_dump_cpu_stacks(struct rcu_state *rsp)
 {
@@ -1287,11 +1388,10 @@ static void rcu_dump_cpu_stacks(struct rcu_state *rsp)
 
        rcu_for_each_leaf_node(rsp, rnp) {
                raw_spin_lock_irqsave_rcu_node(rnp, flags);
-               if (rnp->qsmask != 0) {
-                       for_each_leaf_node_possible_cpu(rnp, cpu)
-                               if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu))
+               for_each_leaf_node_possible_cpu(rnp, cpu)
+                       if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu))
+                               if (!trigger_single_cpu_backtrace(cpu))
                                        dump_cpu_task(cpu);
-               }
                raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
        }
 }
@@ -1379,6 +1479,9 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
               (long)rsp->gpnum, (long)rsp->completed, totqlen);
        if (ndetected) {
                rcu_dump_cpu_stacks(rsp);
+
+               /* Complain about tasks blocking the grace period. */
+               rcu_print_detail_task_stall(rsp);
        } else {
                if (READ_ONCE(rsp->gpnum) != gpnum ||
                    READ_ONCE(rsp->completed) == gpnum) {
@@ -1395,9 +1498,6 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
                }
        }
 
-       /* Complain about tasks blocking the grace period. */
-       rcu_print_detail_task_stall(rsp);
-
        rcu_check_gp_kthread_starvation(rsp);
 
        panic_on_rcu_stall();
@@ -2467,10 +2567,8 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
 
        rnp = rdp->mynode;
        raw_spin_lock_irqsave_rcu_node(rnp, flags);
-       if ((rdp->cpu_no_qs.b.norm &&
-            rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_qs_ctr)) ||
-           rdp->gpnum != rnp->gpnum || rnp->completed == rnp->gpnum ||
-           rdp->gpwrap) {
+       if (rdp->cpu_no_qs.b.norm || rdp->gpnum != rnp->gpnum ||
+           rnp->completed == rnp->gpnum || rdp->gpwrap) {
 
                /*
                 * The grace period in which this quiescent state was
@@ -2525,8 +2623,7 @@ rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
         * Was there a quiescent state since the beginning of the grace
         * period? If no, then exit and wait for the next call.
         */
-       if (rdp->cpu_no_qs.b.norm &&
-           rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_qs_ctr))
+       if (rdp->cpu_no_qs.b.norm)
                return;
 
        /*
@@ -3480,9 +3577,7 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
            rdp->core_needs_qs && rdp->cpu_no_qs.b.norm &&
            rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_qs_ctr)) {
                rdp->n_rp_core_needs_qs++;
-       } else if (rdp->core_needs_qs &&
-                  (!rdp->cpu_no_qs.b.norm ||
-                   rdp->rcu_qs_ctr_snap != __this_cpu_read(rcu_qs_ctr))) {
+       } else if (rdp->core_needs_qs && !rdp->cpu_no_qs.b.norm) {
                rdp->n_rp_report_qs++;
                return 1;
        }
@@ -3748,7 +3843,7 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
        rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu);
        rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
        WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
-       WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
+       WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp->dynticks)));
        rdp->cpu = cpu;
        rdp->rsp = rsp;
        rcu_boot_init_nocb_percpu_data(rdp);
@@ -3765,7 +3860,6 @@ static void
 rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
 {
        unsigned long flags;
-       unsigned long mask;
        struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
        struct rcu_node *rnp = rcu_get_root(rsp);
 
@@ -3778,8 +3872,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
                init_callback_list(rdp);  /* Re-enable callbacks on this CPU. */
        rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
        rcu_sysidle_init_percpu_data(rdp->dynticks);
-       atomic_set(&rdp->dynticks->dynticks,
-                  (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
+       rcu_dynticks_eqs_online();
        raw_spin_unlock_rcu_node(rnp);          /* irqs remain disabled. */
 
        /*
@@ -3788,7 +3881,6 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
         * of the next grace period.
         */
        rnp = rdp->mynode;
-       mask = rdp->grpmask;
        raw_spin_lock_rcu_node(rnp);            /* irqs already disabled. */
        if (!rdp->beenonline)
                WRITE_ONCE(rsp->ncpus, READ_ONCE(rsp->ncpus) + 1);
@@ -3872,7 +3964,7 @@ void rcu_cpu_starting(unsigned int cpu)
        struct rcu_state *rsp;
 
        for_each_rcu_flavor(rsp) {
-               rdp = this_cpu_ptr(rsp->rda);
+               rdp = per_cpu_ptr(rsp->rda, cpu);
                rnp = rdp->mynode;
                mask = rdp->grpmask;
                raw_spin_lock_irqsave_rcu_node(rnp, flags);
index fe98dd24adf895216b52c80bd102a561368df7bb..b60f2b6caa1443495f0609eae89b8762a3c89359 100644 (file)
@@ -521,7 +521,6 @@ struct rcu_state {
        struct mutex exp_mutex;                 /* Serialize expedited GP. */
        struct mutex exp_wake_mutex;            /* Serialize wakeup. */
        unsigned long expedited_sequence;       /* Take a ticket. */
-       atomic_long_t expedited_normal;         /* # fallbacks to normal. */
        atomic_t expedited_need_qs;             /* # CPUs left to check in. */
        struct swait_queue_head expedited_wq;   /* Wait for check-ins. */
        int ncpus_snap;                         /* # CPUs seen last time. */
@@ -595,6 +594,8 @@ extern struct rcu_state rcu_bh_state;
 extern struct rcu_state rcu_preempt_state;
 #endif /* #ifdef CONFIG_PREEMPT_RCU */
 
+int rcu_dynticks_snap(struct rcu_dynticks *rdtp);
+
 #ifdef CONFIG_RCU_BOOST
 DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
 DECLARE_PER_CPU(int, rcu_cpu_kthread_cpu);
@@ -687,18 +688,6 @@ static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll)
 }
 #endif /* #ifdef CONFIG_RCU_TRACE */
 
-/*
- * Place this after a lock-acquisition primitive to guarantee that
- * an UNLOCK+LOCK pair act as a full barrier.  This guarantee applies
- * if the UNLOCK and LOCK are executed by the same CPU or if the
- * UNLOCK and LOCK operate on the same lock variable.
- */
-#ifdef CONFIG_PPC
-#define smp_mb__after_unlock_lock()    smp_mb()  /* Full ordering for lock. */
-#else /* #ifdef CONFIG_PPC */
-#define smp_mb__after_unlock_lock()    do { } while (0)
-#endif /* #else #ifdef CONFIG_PPC */
-
 /*
  * Wrappers for the rcu_node::lock acquire and release.
  *
index e59e1849b89aca14797999deb3e9e91bdd9b78c2..a7b639ccd46e0ade81946639ef1d50bdc2b68d21 100644 (file)
  * Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
  */
 
-/* Wrapper functions for expedited grace periods.  */
+/*
+ * Record the start of an expedited grace period.
+ */
 static void rcu_exp_gp_seq_start(struct rcu_state *rsp)
 {
        rcu_seq_start(&rsp->expedited_sequence);
 }
+
+/*
+ * Record the end of an expedited grace period.
+ */
 static void rcu_exp_gp_seq_end(struct rcu_state *rsp)
 {
        rcu_seq_end(&rsp->expedited_sequence);
        smp_mb(); /* Ensure that consecutive grace periods serialize. */
 }
+
+/*
+ * Take a snapshot of the expedited-grace-period counter.
+ */
 static unsigned long rcu_exp_gp_seq_snap(struct rcu_state *rsp)
 {
        unsigned long s;
@@ -39,6 +49,12 @@ static unsigned long rcu_exp_gp_seq_snap(struct rcu_state *rsp)
        trace_rcu_exp_grace_period(rsp->name, s, TPS("snap"));
        return s;
 }
+
+/*
+ * Given a counter snapshot from rcu_exp_gp_seq_snap(), return true
+ * if a full expedited grace period has elapsed since that snapshot
+ * was taken.
+ */
 static bool rcu_exp_gp_seq_done(struct rcu_state *rsp, unsigned long s)
 {
        return rcu_seq_done(&rsp->expedited_sequence, s);
@@ -356,12 +372,11 @@ static void sync_rcu_exp_select_cpus(struct rcu_state *rsp,
                mask_ofl_test = 0;
                for_each_leaf_node_possible_cpu(rnp, cpu) {
                        struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
-                       struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
 
                        rdp->exp_dynticks_snap =
-                               atomic_add_return(0, &rdtp->dynticks);
+                               rcu_dynticks_snap(rdp->dynticks);
                        if (raw_smp_processor_id() == cpu ||
-                           !(rdp->exp_dynticks_snap & 0x1) ||
+                           rcu_dynticks_in_eqs(rdp->exp_dynticks_snap) ||
                            !(rnp->qsmaskinitnext & rdp->grpmask))
                                mask_ofl_test |= rdp->grpmask;
                }
@@ -380,13 +395,12 @@ static void sync_rcu_exp_select_cpus(struct rcu_state *rsp,
                for_each_leaf_node_possible_cpu(rnp, cpu) {
                        unsigned long mask = leaf_node_cpu_bit(rnp, cpu);
                        struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
-                       struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
 
                        if (!(mask_ofl_ipi & mask))
                                continue;
 retry_ipi:
-                       if (atomic_add_return(0, &rdtp->dynticks) !=
-                           rdp->exp_dynticks_snap) {
+                       if (rcu_dynticks_in_eqs_since(rdp->dynticks,
+                                                     rdp->exp_dynticks_snap)) {
                                mask_ofl_test |= mask;
                                continue;
                        }
@@ -623,6 +637,11 @@ void synchronize_sched_expedited(void)
 {
        struct rcu_state *rsp = &rcu_sched_state;
 
+       RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
+                        lock_is_held(&rcu_lock_map) ||
+                        lock_is_held(&rcu_sched_lock_map),
+                        "Illegal synchronize_sched_expedited() in RCU read-side critical section");
+
        /* If only one CPU, this is automatically a grace period. */
        if (rcu_blocking_is_gp())
                return;
@@ -692,6 +711,11 @@ void synchronize_rcu_expedited(void)
 {
        struct rcu_state *rsp = rcu_state_p;
 
+       RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
+                        lock_is_held(&rcu_lock_map) ||
+                        lock_is_held(&rcu_sched_lock_map),
+                        "Illegal synchronize_rcu_expedited() in RCU read-side critical section");
+
        if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
                return;
        _synchronize_rcu_expedited(rsp, sync_rcu_exp_handler);
index 56583e764ebf398a7b14f442f63ce6f707f046e4..a240f3308be61cac3d6d5c1a2e1b447b1c4a20cf 100644 (file)
@@ -1643,7 +1643,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
               "o."[!!(rdp->grpmask & rdp->mynode->qsmaskinit)],
               "N."[!!(rdp->grpmask & rdp->mynode->qsmaskinitnext)],
               ticks_value, ticks_title,
-              atomic_read(&rdtp->dynticks) & 0xfff,
+              rcu_dynticks_snap(rdtp) & 0xfff,
               rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
               rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
               READ_ONCE(rsp->n_force_qs) - rsp->n_force_qs_gpstart,
@@ -2366,8 +2366,9 @@ static void __init rcu_organize_nocb_kthreads(struct rcu_state *rsp)
        }
 
        /*
-        * Each pass through this loop sets up one rcu_data structure and
-        * spawns one rcu_nocb_kthread().
+        * Each pass through this loop sets up one rcu_data structure.
+        * Should the corresponding CPU come online in the future, then
+        * we will spawn the needed set of rcu_nocb_kthread() kthreads.
         */
        for_each_cpu(cpu, rcu_nocb_mask) {
                rdp = per_cpu_ptr(rsp->rda, cpu);
index b1f28972872cb1fe3edd4a835ad3cbb5d692886a..8751a748499a3d3a93419fe5273bbf4876aa2149 100644 (file)
@@ -124,7 +124,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
                   rdp->rcu_qs_ctr_snap == per_cpu(rcu_qs_ctr, rdp->cpu),
                   rdp->core_needs_qs);
        seq_printf(m, " dt=%d/%llx/%d df=%lu",
-                  atomic_read(&rdp->dynticks->dynticks),
+                  rcu_dynticks_snap(rdp->dynticks),
                   rdp->dynticks->dynticks_nesting,
                   rdp->dynticks->dynticks_nmi_nesting,
                   rdp->dynticks_fqs);
@@ -194,9 +194,8 @@ static int show_rcuexp(struct seq_file *m, void *v)
                s2 += atomic_long_read(&rdp->exp_workdone2);
                s3 += atomic_long_read(&rdp->exp_workdone3);
        }
-       seq_printf(m, "s=%lu wd0=%lu wd1=%lu wd2=%lu wd3=%lu n=%lu enq=%d sc=%lu\n",
+       seq_printf(m, "s=%lu wd0=%lu wd1=%lu wd2=%lu wd3=%lu enq=%d sc=%lu\n",
                   rsp->expedited_sequence, s0, s1, s2, s3,
-                  atomic_long_read(&rsp->expedited_normal),
                   atomic_read(&rsp->expedited_need_qs),
                   rsp->expedited_sequence / 2);
        return 0;
index 4f6db7e6a1179ee00c99f62d854a39b00c959d2b..9e03db9ea9c09c422625537c80a16a020fe0bda0 100644 (file)
@@ -132,8 +132,7 @@ bool rcu_gp_is_normal(void)
 }
 EXPORT_SYMBOL_GPL(rcu_gp_is_normal);
 
-static atomic_t rcu_expedited_nesting =
-       ATOMIC_INIT(IS_ENABLED(CONFIG_RCU_EXPEDITE_BOOT) ? 1 : 0);
+static atomic_t rcu_expedited_nesting = ATOMIC_INIT(1);
 
 /*
  * Should normal grace-period primitives be expedited?  Intended for
@@ -182,8 +181,7 @@ EXPORT_SYMBOL_GPL(rcu_unexpedite_gp);
  */
 void rcu_end_inkernel_boot(void)
 {
-       if (IS_ENABLED(CONFIG_RCU_EXPEDITE_BOOT))
-               rcu_unexpedite_gp();
+       rcu_unexpedite_gp();
        if (rcu_normal_after_boot)
                WRITE_ONCE(rcu_normal, 1);
 }
index 5e59b832ae2b4b7447de17c105da19890763826d..89ab6758667bc12ee359cd2c4aab481a925b22fb 100644 (file)
@@ -18,8 +18,8 @@ endif
 obj-y += core.o loadavg.o clock.o cputime.o
 obj-y += idle_task.o fair.o rt.o deadline.o stop_task.o
 obj-y += wait.o swait.o completion.o idle.o
-obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o
-obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o
+obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o topology.o
+obj-$(CONFIG_SCHED_AUTOGROUP) += autogroup.o
 obj-$(CONFIG_SCHEDSTATS) += stats.o
 obj-$(CONFIG_SCHED_DEBUG) += debug.o
 obj-$(CONFIG_CGROUP_CPUACCT) += cpuacct.o
index e85a725e5c3496687cccffa196372011f75ef2ad..ad64efe41722bef0e3a056386f677de8ecd3eda9 100644 (file)
@@ -77,41 +77,88 @@ EXPORT_SYMBOL_GPL(sched_clock);
 
 __read_mostly int sched_clock_running;
 
+void sched_clock_init(void)
+{
+       sched_clock_running = 1;
+}
+
 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
-static struct static_key __sched_clock_stable = STATIC_KEY_INIT;
-static int __sched_clock_stable_early;
+/*
+ * We must start with !__sched_clock_stable because the unstable -> stable
+ * transition is accurate, while the stable -> unstable transition is not.
+ *
+ * Similarly we start with __sched_clock_stable_early, thereby assuming we
+ * will become stable, such that there's only a single 1 -> 0 transition.
+ */
+static DEFINE_STATIC_KEY_FALSE(__sched_clock_stable);
+static int __sched_clock_stable_early = 1;
 
-int sched_clock_stable(void)
+/*
+ * We want: ktime_get_ns() + gtod_offset == sched_clock() + raw_offset
+ */
+static __read_mostly u64 raw_offset;
+static __read_mostly u64 gtod_offset;
+
+struct sched_clock_data {
+       u64                     tick_raw;
+       u64                     tick_gtod;
+       u64                     clock;
+};
+
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data);
+
+static inline struct sched_clock_data *this_scd(void)
 {
-       return static_key_false(&__sched_clock_stable);
+       return this_cpu_ptr(&sched_clock_data);
 }
 
-static void __set_sched_clock_stable(void)
+static inline struct sched_clock_data *cpu_sdc(int cpu)
 {
-       if (!sched_clock_stable())
-               static_key_slow_inc(&__sched_clock_stable);
+       return &per_cpu(sched_clock_data, cpu);
+}
 
-       tick_dep_clear(TICK_DEP_BIT_CLOCK_UNSTABLE);
+int sched_clock_stable(void)
+{
+       return static_branch_likely(&__sched_clock_stable);
 }
 
-void set_sched_clock_stable(void)
+static void __set_sched_clock_stable(void)
 {
-       __sched_clock_stable_early = 1;
+       struct sched_clock_data *scd = this_scd();
 
-       smp_mb(); /* matches sched_clock_init() */
+       /*
+        * Attempt to make the (initial) unstable->stable transition continuous.
+        */
+       raw_offset = (scd->tick_gtod + gtod_offset) - (scd->tick_raw);
 
-       if (!sched_clock_running)
-               return;
+       printk(KERN_INFO "sched_clock: Marking stable (%lld, %lld)->(%lld, %lld)\n",
+                       scd->tick_gtod, gtod_offset,
+                       scd->tick_raw,  raw_offset);
 
-       __set_sched_clock_stable();
+       static_branch_enable(&__sched_clock_stable);
+       tick_dep_clear(TICK_DEP_BIT_CLOCK_UNSTABLE);
 }
 
 static void __clear_sched_clock_stable(struct work_struct *work)
 {
-       /* XXX worry about clock continuity */
-       if (sched_clock_stable())
-               static_key_slow_dec(&__sched_clock_stable);
+       struct sched_clock_data *scd = this_scd();
+
+       /*
+        * Attempt to make the stable->unstable transition continuous.
+        *
+        * Trouble is, this is typically called from the TSC watchdog
+        * timer, which is late per definition. This means the tick
+        * values can already be screwy.
+        *
+        * Still do what we can.
+        */
+       gtod_offset = (scd->tick_raw + raw_offset) - (scd->tick_gtod);
+
+       printk(KERN_INFO "sched_clock: Marking unstable (%lld, %lld)<-(%lld, %lld)\n",
+                       scd->tick_gtod, gtod_offset,
+                       scd->tick_raw,  raw_offset);
 
+       static_branch_disable(&__sched_clock_stable);
        tick_dep_set(TICK_DEP_BIT_CLOCK_UNSTABLE);
 }
 
@@ -121,47 +168,15 @@ void clear_sched_clock_stable(void)
 {
        __sched_clock_stable_early = 0;
 
-       smp_mb(); /* matches sched_clock_init() */
-
-       if (!sched_clock_running)
-               return;
+       smp_mb(); /* matches sched_clock_init_late() */
 
-       schedule_work(&sched_clock_work);
+       if (sched_clock_running == 2)
+               schedule_work(&sched_clock_work);
 }
 
-struct sched_clock_data {
-       u64                     tick_raw;
-       u64                     tick_gtod;
-       u64                     clock;
-};
-
-static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data);
-
-static inline struct sched_clock_data *this_scd(void)
+void sched_clock_init_late(void)
 {
-       return this_cpu_ptr(&sched_clock_data);
-}
-
-static inline struct sched_clock_data *cpu_sdc(int cpu)
-{
-       return &per_cpu(sched_clock_data, cpu);
-}
-
-void sched_clock_init(void)
-{
-       u64 ktime_now = ktime_to_ns(ktime_get());
-       int cpu;
-
-       for_each_possible_cpu(cpu) {
-               struct sched_clock_data *scd = cpu_sdc(cpu);
-
-               scd->tick_raw = 0;
-               scd->tick_gtod = ktime_now;
-               scd->clock = ktime_now;
-       }
-
-       sched_clock_running = 1;
-
+       sched_clock_running = 2;
        /*
         * Ensure that it is impossible to not do a static_key update.
         *
@@ -173,8 +188,6 @@ void sched_clock_init(void)
 
        if (__sched_clock_stable_early)
                __set_sched_clock_stable();
-       else
-               __clear_sched_clock_stable(NULL);
 }
 
 /*
@@ -216,7 +229,7 @@ again:
         *                    scd->tick_gtod + TICK_NSEC);
         */
 
-       clock = scd->tick_gtod + delta;
+       clock = scd->tick_gtod + gtod_offset + delta;
        min_clock = wrap_max(scd->tick_gtod, old_clock);
        max_clock = wrap_max(old_clock, scd->tick_gtod + TICK_NSEC);
 
@@ -302,7 +315,7 @@ u64 sched_clock_cpu(int cpu)
        u64 clock;
 
        if (sched_clock_stable())
-               return sched_clock();
+               return sched_clock() + raw_offset;
 
        if (unlikely(!sched_clock_running))
                return 0ull;
@@ -323,23 +336,22 @@ EXPORT_SYMBOL_GPL(sched_clock_cpu);
 void sched_clock_tick(void)
 {
        struct sched_clock_data *scd;
-       u64 now, now_gtod;
-
-       if (sched_clock_stable())
-               return;
-
-       if (unlikely(!sched_clock_running))
-               return;
 
        WARN_ON_ONCE(!irqs_disabled());
 
+       /*
+        * Update these values even if sched_clock_stable(), because it can
+        * become unstable at any point in time at which point we need some
+        * values to fall back on.
+        *
+        * XXX arguably we can skip this if we expose tsc_clocksource_reliable
+        */
        scd = this_scd();
-       now_gtod = ktime_to_ns(ktime_get());
-       now = sched_clock();
+       scd->tick_raw  = sched_clock();
+       scd->tick_gtod = ktime_get_ns();
 
-       scd->tick_raw = now;
-       scd->tick_gtod = now_gtod;
-       sched_clock_local(scd);
+       if (!sched_clock_stable() && likely(sched_clock_running))
+               sched_clock_local(scd);
 }
 
 /*
@@ -366,11 +378,6 @@ EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
 
 #else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
 
-void sched_clock_init(void)
-{
-       sched_clock_running = 1;
-}
-
 u64 sched_clock_cpu(int cpu)
 {
        if (unlikely(!sched_clock_running))
@@ -378,6 +385,7 @@ u64 sched_clock_cpu(int cpu)
 
        return sched_clock();
 }
+
 #endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
 
 /*
index 8d0f35debf35657689908a4b37df7230ba7d6710..f063a25d44493fd79dfb52aece2cc3ca2f907a07 100644 (file)
@@ -31,7 +31,8 @@ void complete(struct completion *x)
        unsigned long flags;
 
        spin_lock_irqsave(&x->wait.lock, flags);
-       x->done++;
+       if (x->done != UINT_MAX)
+               x->done++;
        __wake_up_locked(&x->wait, TASK_NORMAL, 1);
        spin_unlock_irqrestore(&x->wait.lock, flags);
 }
@@ -51,7 +52,7 @@ void complete_all(struct completion *x)
        unsigned long flags;
 
        spin_lock_irqsave(&x->wait.lock, flags);
-       x->done += UINT_MAX/2;
+       x->done = UINT_MAX;
        __wake_up_locked(&x->wait, TASK_NORMAL, 0);
        spin_unlock_irqrestore(&x->wait.lock, flags);
 }
@@ -79,7 +80,8 @@ do_wait_for_common(struct completion *x,
                if (!x->done)
                        return timeout;
        }
-       x->done--;
+       if (x->done != UINT_MAX)
+               x->done--;
        return timeout ?: 1;
 }
 
@@ -280,7 +282,7 @@ bool try_wait_for_completion(struct completion *x)
        spin_lock_irqsave(&x->wait.lock, flags);
        if (!x->done)
                ret = 0;
-       else
+       else if (x->done != UINT_MAX)
                x->done--;
        spin_unlock_irqrestore(&x->wait.lock, flags);
        return ret;
index c56fb57f2991ef4f2a68395c534b32d3053ae208..34e2291a9a6c163be3dfd774ed110bf07ab0e890 100644 (file)
@@ -1,88 +1,28 @@
 /*
  *  kernel/sched/core.c
  *
- *  Kernel scheduler and related syscalls
+ *  Core kernel scheduler code and related syscalls
  *
  *  Copyright (C) 1991-2002  Linus Torvalds
- *
- *  1996-12-23  Modified by Dave Grothe to fix bugs in semaphores and
- *             make semaphores SMP safe
- *  1998-11-19 Implemented schedule_timeout() and related stuff
- *             by Andrea Arcangeli
- *  2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar:
- *             hybrid priority-list and round-robin design with
- *             an array-switch method of distributing timeslices
- *             and per-CPU runqueues.  Cleanups and useful suggestions
- *             by Davide Libenzi, preemptible kernel bits by Robert Love.
- *  2003-09-03 Interactivity tuning by Con Kolivas.
- *  2004-04-02 Scheduler domains code by Nick Piggin
- *  2007-04-15  Work begun on replacing all interactivity tuning with a
- *              fair scheduling design by Con Kolivas.
- *  2007-05-05  Load balancing (smp-nice) and other improvements
- *              by Peter Williams
- *  2007-05-06  Interactivity improvements to CFS by Mike Galbraith
- *  2007-07-01  Group scheduling enhancements by Srivatsa Vaddagiri
- *  2007-11-29  RT balancing improvements by Steven Rostedt, Gregory Haskins,
- *              Thomas Gleixner, Mike Kravetz
  */
-
-#include <linux/kasan.h>
-#include <linux/mm.h>
-#include <linux/module.h>
-#include <linux/nmi.h>
-#include <linux/init.h>
-#include <linux/uaccess.h>
-#include <linux/highmem.h>
-#include <linux/mmu_context.h>
-#include <linux/interrupt.h>
-#include <linux/capability.h>
-#include <linux/completion.h>
-#include <linux/kernel_stat.h>
-#include <linux/debug_locks.h>
-#include <linux/perf_event.h>
-#include <linux/security.h>
-#include <linux/notifier.h>
-#include <linux/profile.h>
-#include <linux/freezer.h>
-#include <linux/vmalloc.h>
-#include <linux/blkdev.h>
-#include <linux/delay.h>
-#include <linux/pid_namespace.h>
-#include <linux/smp.h>
-#include <linux/threads.h>
-#include <linux/timer.h>
-#include <linux/rcupdate.h>
-#include <linux/cpu.h>
+#include <linux/sched.h>
 #include <linux/cpuset.h>
-#include <linux/percpu.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/sysctl.h>
-#include <linux/syscalls.h>
-#include <linux/times.h>
-#include <linux/tsacct_kern.h>
-#include <linux/kprobes.h>
 #include <linux/delayacct.h>
-#include <linux/unistd.h>
-#include <linux/pagemap.h>
-#include <linux/hrtimer.h>
-#include <linux/tick.h>
-#include <linux/ctype.h>
-#include <linux/ftrace.h>
-#include <linux/slab.h>
 #include <linux/init_task.h>
 #include <linux/context_tracking.h>
-#include <linux/compiler.h>
-#include <linux/frame.h>
+
+#include <linux/blkdev.h>
+#include <linux/kprobes.h>
+#include <linux/mmu_context.h>
+#include <linux/module.h>
+#include <linux/nmi.h>
 #include <linux/prefetch.h>
-#include <linux/mutex.h>
+#include <linux/profile.h>
+#include <linux/security.h>
+#include <linux/syscalls.h>
 
 #include <asm/switch_to.h>
 #include <asm/tlb.h>
-#include <asm/irq_regs.h>
-#ifdef CONFIG_PARAVIRT
-#include <asm/paravirt.h>
-#endif
 
 #include "sched.h"
 #include "../workqueue_internal.h"
 #define CREATE_TRACE_POINTS
 #include <trace/events/sched.h>
 
-DEFINE_MUTEX(sched_domains_mutex);
 DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
 
-static void update_rq_clock_task(struct rq *rq, s64 delta);
-
-void update_rq_clock(struct rq *rq)
-{
-       s64 delta;
-
-       lockdep_assert_held(&rq->lock);
-
-       if (rq->clock_skip_update & RQCF_ACT_SKIP)
-               return;
-
-       delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
-       if (delta < 0)
-               return;
-       rq->clock += delta;
-       update_rq_clock_task(rq, delta);
-}
-
 /*
  * Debugging: various feature bits
  */
@@ -140,7 +61,7 @@ const_debug unsigned int sysctl_sched_nr_migrate = 32;
 const_debug unsigned int sysctl_sched_time_avg = MSEC_PER_SEC;
 
 /*
- * period over which we measure -rt task cpu usage in us.
+ * period over which we measure -rt task CPU usage in us.
  * default: 1s
  */
 unsigned int sysctl_sched_rt_period = 1000000;
@@ -153,7 +74,7 @@ __read_mostly int scheduler_running;
  */
 int sysctl_sched_rt_runtime = 950000;
 
-/* cpus with isolated domains */
+/* CPUs with isolated domains */
 cpumask_var_t cpu_isolated_map;
 
 /*
@@ -185,7 +106,7 @@ struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
                rq = task_rq(p);
                raw_spin_lock(&rq->lock);
                if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
-                       rf->cookie = lockdep_pin_lock(&rq->lock);
+                       rq_pin_lock(rq, rf);
                        return rq;
                }
                raw_spin_unlock(&rq->lock);
@@ -221,11 +142,11 @@ struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
                 * If we observe the old cpu in task_rq_lock, the acquire of
                 * the old rq->lock will fully serialize against the stores.
                 *
-                * If we observe the new cpu in task_rq_lock, the acquire will
+                * If we observe the new CPU in task_rq_lock, the acquire will
                 * pair with the WMB to ensure we must then also see migrating.
                 */
                if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
-                       rf->cookie = lockdep_pin_lock(&rq->lock);
+                       rq_pin_lock(rq, rf);
                        return rq;
                }
                raw_spin_unlock(&rq->lock);
@@ -236,6 +157,84 @@ struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
        }
 }
 
+/*
+ * RQ-clock updating methods:
+ */
+
+static void update_rq_clock_task(struct rq *rq, s64 delta)
+{
+/*
+ * In theory, the compile should just see 0 here, and optimize out the call
+ * to sched_rt_avg_update. But I don't trust it...
+ */
+#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
+       s64 steal = 0, irq_delta = 0;
+#endif
+#ifdef CONFIG_IRQ_TIME_ACCOUNTING
+       irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
+
+       /*
+        * Since irq_time is only updated on {soft,}irq_exit, we might run into
+        * this case when a previous update_rq_clock() happened inside a
+        * {soft,}irq region.
+        *
+        * When this happens, we stop ->clock_task and only update the
+        * prev_irq_time stamp to account for the part that fit, so that a next
+        * update will consume the rest. This ensures ->clock_task is
+        * monotonic.
+        *
+        * It does however cause some slight miss-attribution of {soft,}irq
+        * time, a more accurate solution would be to update the irq_time using
+        * the current rq->clock timestamp, except that would require using
+        * atomic ops.
+        */
+       if (irq_delta > delta)
+               irq_delta = delta;
+
+       rq->prev_irq_time += irq_delta;
+       delta -= irq_delta;
+#endif
+#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
+       if (static_key_false((&paravirt_steal_rq_enabled))) {
+               steal = paravirt_steal_clock(cpu_of(rq));
+               steal -= rq->prev_steal_time_rq;
+
+               if (unlikely(steal > delta))
+                       steal = delta;
+
+               rq->prev_steal_time_rq += steal;
+               delta -= steal;
+       }
+#endif
+
+       rq->clock_task += delta;
+
+#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
+       if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY))
+               sched_rt_avg_update(rq, irq_delta + steal);
+#endif
+}
+
+void update_rq_clock(struct rq *rq)
+{
+       s64 delta;
+
+       lockdep_assert_held(&rq->lock);
+
+       if (rq->clock_update_flags & RQCF_ACT_SKIP)
+               return;
+
+#ifdef CONFIG_SCHED_DEBUG
+       rq->clock_update_flags |= RQCF_UPDATED;
+#endif
+       delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
+       if (delta < 0)
+               return;
+       rq->clock += delta;
+       update_rq_clock_task(rq, delta);
+}
+
+
 #ifdef CONFIG_SCHED_HRTICK
 /*
  * Use HR-timers to deliver accurate preemption points.
@@ -458,7 +457,7 @@ void wake_up_q(struct wake_q_head *head)
 
                task = container_of(node, struct task_struct, wake_q);
                BUG_ON(!task);
-               /* task can safely be re-inserted now */
+               /* Task can safely be re-inserted now: */
                node = node->next;
                task->wake_q.next = NULL;
 
@@ -516,12 +515,12 @@ void resched_cpu(int cpu)
 #ifdef CONFIG_SMP
 #ifdef CONFIG_NO_HZ_COMMON
 /*
- * In the semi idle case, use the nearest busy cpu for migrating timers
- * from an idle cpu.  This is good for power-savings.
+ * In the semi idle case, use the nearest busy CPU for migrating timers
+ * from an idle CPU.  This is good for power-savings.
  *
  * We don't do similar optimization for completely idle system, as
- * selecting an idle cpu will add more delays to the timers than intended
- * (as that cpu's timer base may not be uptodate wrt jiffies etc).
+ * selecting an idle CPU will add more delays to the timers than intended
+ * (as that CPU's timer base may not be uptodate wrt jiffies etc).
  */
 int get_nohz_timer_target(void)
 {
@@ -550,6 +549,7 @@ unlock:
        rcu_read_unlock();
        return cpu;
 }
+
 /*
  * When add_timer_on() enqueues a timer into the timer wheel of an
  * idle CPU then this timer might expire before the next timer event
@@ -784,60 +784,6 @@ void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
        dequeue_task(rq, p, flags);
 }
 
-static void update_rq_clock_task(struct rq *rq, s64 delta)
-{
-/*
- * In theory, the compile should just see 0 here, and optimize out the call
- * to sched_rt_avg_update. But I don't trust it...
- */
-#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
-       s64 steal = 0, irq_delta = 0;
-#endif
-#ifdef CONFIG_IRQ_TIME_ACCOUNTING
-       irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
-
-       /*
-        * Since irq_time is only updated on {soft,}irq_exit, we might run into
-        * this case when a previous update_rq_clock() happened inside a
-        * {soft,}irq region.
-        *
-        * When this happens, we stop ->clock_task and only update the
-        * prev_irq_time stamp to account for the part that fit, so that a next
-        * update will consume the rest. This ensures ->clock_task is
-        * monotonic.
-        *
-        * It does however cause some slight miss-attribution of {soft,}irq
-        * time, a more accurate solution would be to update the irq_time using
-        * the current rq->clock timestamp, except that would require using
-        * atomic ops.
-        */
-       if (irq_delta > delta)
-               irq_delta = delta;
-
-       rq->prev_irq_time += irq_delta;
-       delta -= irq_delta;
-#endif
-#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
-       if (static_key_false((&paravirt_steal_rq_enabled))) {
-               steal = paravirt_steal_clock(cpu_of(rq));
-               steal -= rq->prev_steal_time_rq;
-
-               if (unlikely(steal > delta))
-                       steal = delta;
-
-               rq->prev_steal_time_rq += steal;
-               delta -= steal;
-       }
-#endif
-
-       rq->clock_task += delta;
-
-#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
-       if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY))
-               sched_rt_avg_update(rq, irq_delta + steal);
-#endif
-}
-
 void sched_set_stop_task(int cpu, struct task_struct *stop)
 {
        struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
@@ -1018,7 +964,7 @@ struct migration_arg {
 };
 
 /*
- * Move (not current) task off this cpu, onto dest cpu. We're doing
+ * Move (not current) task off this CPU, onto the destination CPU. We're doing
  * this because either it can't run here any more (set_cpus_allowed()
  * away from this CPU, or CPU going down), or because we're
  * attempting to rebalance this task on exec (sched_exec).
@@ -1052,8 +998,8 @@ static int migration_cpu_stop(void *data)
        struct rq *rq = this_rq();
 
        /*
-        * The original target cpu might have gone down and we might
-        * be on another cpu but it doesn't matter.
+        * The original target CPU might have gone down and we might
+        * be on another CPU but it doesn't matter.
         */
        local_irq_disable();
        /*
@@ -1171,7 +1117,7 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
        if (p->flags & PF_KTHREAD) {
                /*
                 * For kernel threads that do indeed end up on online &&
-                * !active we want to ensure they are strict per-cpu threads.
+                * !active we want to ensure they are strict per-CPU threads.
                 */
                WARN_ON(cpumask_intersects(new_mask, cpu_online_mask) &&
                        !cpumask_intersects(new_mask, cpu_active_mask) &&
@@ -1195,9 +1141,9 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
                 * OK, since we're going to drop the lock immediately
                 * afterwards anyway.
                 */
-               lockdep_unpin_lock(&rq->lock, rf.cookie);
+               rq_unpin_lock(rq, &rf);
                rq = move_queued_task(rq, p, dest_cpu);
-               lockdep_repin_lock(&rq->lock, rf.cookie);
+               rq_repin_lock(rq, &rf);
        }
 out:
        task_rq_unlock(rq, p, &rf);
@@ -1276,7 +1222,7 @@ static void __migrate_swap_task(struct task_struct *p, int cpu)
                /*
                 * Task isn't running anymore; make it appear like we migrated
                 * it before it went to sleep. This means on wakeup we make the
-                * previous cpu our target instead of where it really is.
+                * previous CPU our target instead of where it really is.
                 */
                p->wake_cpu = cpu;
        }
@@ -1508,12 +1454,12 @@ EXPORT_SYMBOL_GPL(kick_process);
  *
  *  - on cpu-up we allow per-cpu kthreads on the online && !active cpu,
  *    see __set_cpus_allowed_ptr(). At this point the newly online
- *    cpu isn't yet part of the sched domains, and balancing will not
+ *    CPU isn't yet part of the sched domains, and balancing will not
  *    see it.
  *
- *  - on cpu-down we clear cpu_active() to mask the sched domains and
+ *  - on CPU-down we clear cpu_active() to mask the sched domains and
  *    avoid the load balancer to place new tasks on the to be removed
- *    cpu. Existing tasks will remain running there and will be taken
+ *    CPU. Existing tasks will remain running there and will be taken
  *    off.
  *
  * This means that fallback selection must not select !active CPUs.
@@ -1529,9 +1475,9 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
        int dest_cpu;
 
        /*
-        * If the node that the cpu is on has been offlined, cpu_to_node()
-        * will return -1. There is no cpu on the node, and we should
-        * select the cpu on the other node.
+        * If the node that the CPU is on has been offlined, cpu_to_node()
+        * will return -1. There is no CPU on the node, and we should
+        * select the CPU on the other node.
         */
        if (nid != -1) {
                nodemask = cpumask_of_node(nid);
@@ -1563,7 +1509,7 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
                                state = possible;
                                break;
                        }
-                       /* fall-through */
+                       /* Fall-through */
                case possible:
                        do_set_cpus_allowed(p, cpu_possible_mask);
                        state = fail;
@@ -1607,7 +1553,7 @@ int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags)
        /*
         * In order not to call set_task_cpu() on a blocking task we need
         * to rely on ttwu() to place the task on a valid ->cpus_allowed
-        * cpu.
+        * CPU.
         *
         * Since this is common to all placement strategies, this lives here.
         *
@@ -1681,7 +1627,7 @@ static inline void ttwu_activate(struct rq *rq, struct task_struct *p, int en_fl
        activate_task(rq, p, en_flags);
        p->on_rq = TASK_ON_RQ_QUEUED;
 
-       /* if a worker is waking up, notify workqueue */
+       /* If a worker is waking up, notify the workqueue: */
        if (p->flags & PF_WQ_WORKER)
                wq_worker_waking_up(p, cpu_of(rq));
 }
@@ -1690,7 +1636,7 @@ static inline void ttwu_activate(struct rq *rq, struct task_struct *p, int en_fl
  * Mark the task runnable and perform wakeup-preemption.
  */
 static void ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags,
-                          struct pin_cookie cookie)
+                          struct rq_flags *rf)
 {
        check_preempt_curr(rq, p, wake_flags);
        p->state = TASK_RUNNING;
@@ -1702,9 +1648,9 @@ static void ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags,
                 * Our task @p is fully woken up and running; so its safe to
                 * drop the rq->lock, hereafter rq is only used for statistics.
                 */
-               lockdep_unpin_lock(&rq->lock, cookie);
+               rq_unpin_lock(rq, rf);
                p->sched_class->task_woken(rq, p);
-               lockdep_repin_lock(&rq->lock, cookie);
+               rq_repin_lock(rq, rf);
        }
 
        if (rq->idle_stamp) {
@@ -1723,7 +1669,7 @@ static void ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags,
 
 static void
 ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags,
-                struct pin_cookie cookie)
+                struct rq_flags *rf)
 {
        int en_flags = ENQUEUE_WAKEUP;
 
@@ -1738,7 +1684,7 @@ ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags,
 #endif
 
        ttwu_activate(rq, p, en_flags);
-       ttwu_do_wakeup(rq, p, wake_flags, cookie);
+       ttwu_do_wakeup(rq, p, wake_flags, rf);
 }
 
 /*
@@ -1757,7 +1703,7 @@ static int ttwu_remote(struct task_struct *p, int wake_flags)
        if (task_on_rq_queued(p)) {
                /* check_preempt_curr() may use rq clock */
                update_rq_clock(rq);
-               ttwu_do_wakeup(rq, p, wake_flags, rf.cookie);
+               ttwu_do_wakeup(rq, p, wake_flags, &rf);
                ret = 1;
        }
        __task_rq_unlock(rq, &rf);
@@ -1770,15 +1716,15 @@ void sched_ttwu_pending(void)
 {
        struct rq *rq = this_rq();
        struct llist_node *llist = llist_del_all(&rq->wake_list);
-       struct pin_cookie cookie;
        struct task_struct *p;
        unsigned long flags;
+       struct rq_flags rf;
 
        if (!llist)
                return;
 
        raw_spin_lock_irqsave(&rq->lock, flags);
-       cookie = lockdep_pin_lock(&rq->lock);
+       rq_pin_lock(rq, &rf);
 
        while (llist) {
                int wake_flags = 0;
@@ -1789,10 +1735,10 @@ void sched_ttwu_pending(void)
                if (p->sched_remote_wakeup)
                        wake_flags = WF_MIGRATED;
 
-               ttwu_do_activate(rq, p, wake_flags, cookie);
+               ttwu_do_activate(rq, p, wake_flags, &rf);
        }
 
-       lockdep_unpin_lock(&rq->lock, cookie);
+       rq_unpin_lock(rq, &rf);
        raw_spin_unlock_irqrestore(&rq->lock, flags);
 }
 
@@ -1864,7 +1810,7 @@ void wake_up_if_idle(int cpu)
                raw_spin_lock_irqsave(&rq->lock, flags);
                if (is_idle_task(rq->curr))
                        smp_send_reschedule(cpu);
-               /* Else cpu is not in idle, do nothing here */
+               /* Else CPU is not idle, do nothing here: */
                raw_spin_unlock_irqrestore(&rq->lock, flags);
        }
 
@@ -1881,20 +1827,20 @@ bool cpus_share_cache(int this_cpu, int that_cpu)
 static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
 {
        struct rq *rq = cpu_rq(cpu);
-       struct pin_cookie cookie;
+       struct rq_flags rf;
 
 #if defined(CONFIG_SMP)
        if (sched_feat(TTWU_QUEUE) && !cpus_share_cache(smp_processor_id(), cpu)) {
-               sched_clock_cpu(cpu); /* sync clocks x-cpu */
+               sched_clock_cpu(cpu); /* Sync clocks across CPUs */
                ttwu_queue_remote(p, cpu, wake_flags);
                return;
        }
 #endif
 
        raw_spin_lock(&rq->lock);
-       cookie = lockdep_pin_lock(&rq->lock);
-       ttwu_do_activate(rq, p, wake_flags, cookie);
-       lockdep_unpin_lock(&rq->lock, cookie);
+       rq_pin_lock(rq, &rf);
+       ttwu_do_activate(rq, p, wake_flags, &rf);
+       rq_unpin_lock(rq, &rf);
        raw_spin_unlock(&rq->lock);
 }
 
@@ -1904,8 +1850,8 @@ static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
  *  MIGRATION
  *
  * The basic program-order guarantee on SMP systems is that when a task [t]
- * migrates, all its activity on its old cpu [c0] happens-before any subsequent
- * execution on its new cpu [c1].
+ * migrates, all its activity on its old CPU [c0] happens-before any subsequent
+ * execution on its new CPU [c1].
  *
  * For migration (of runnable tasks) this is provided by the following means:
  *
@@ -1916,7 +1862,7 @@ static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
  *
  * Transitivity guarantees that B happens after A and C after B.
  * Note: we only require RCpc transitivity.
- * Note: the cpu doing B need not be c0 or c1
+ * Note: the CPU doing B need not be c0 or c1
  *
  * Example:
  *
@@ -2024,7 +1970,8 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
 
        trace_sched_waking(p);
 
-       success = 1; /* we're going to change ->state */
+       /* We're going to change ->state: */
+       success = 1;
        cpu = task_cpu(p);
 
        /*
@@ -2073,7 +2020,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
        smp_rmb();
 
        /*
-        * If the owning (remote) cpu is still in the middle of schedule() with
+        * If the owning (remote) CPU is still in the middle of schedule() with
         * this task as prev, wait until its done referencing the task.
         *
         * Pairs with the smp_store_release() in finish_lock_switch().
@@ -2086,11 +2033,24 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
        p->sched_contributes_to_load = !!task_contributes_to_load(p);
        p->state = TASK_WAKING;
 
+       if (p->in_iowait) {
+               delayacct_blkio_end();
+               atomic_dec(&task_rq(p)->nr_iowait);
+       }
+
        cpu = select_task_rq(p, p->wake_cpu, SD_BALANCE_WAKE, wake_flags);
        if (task_cpu(p) != cpu) {
                wake_flags |= WF_MIGRATED;
                set_task_cpu(p, cpu);
        }
+
+#else /* CONFIG_SMP */
+
+       if (p->in_iowait) {
+               delayacct_blkio_end();
+               atomic_dec(&task_rq(p)->nr_iowait);
+       }
+
 #endif /* CONFIG_SMP */
 
        ttwu_queue(p, cpu, wake_flags);
@@ -2111,7 +2071,7 @@ out:
  * ensure that this_rq() is locked, @p is bound to this_rq() and not
  * the current task.
  */
-static void try_to_wake_up_local(struct task_struct *p, struct pin_cookie cookie)
+static void try_to_wake_up_local(struct task_struct *p, struct rq_flags *rf)
 {
        struct rq *rq = task_rq(p);
 
@@ -2128,11 +2088,11 @@ static void try_to_wake_up_local(struct task_struct *p, struct pin_cookie cookie
                 * disabled avoiding further scheduler activity on it and we've
                 * not yet picked a replacement task.
                 */
-               lockdep_unpin_lock(&rq->lock, cookie);
+               rq_unpin_lock(rq, rf);
                raw_spin_unlock(&rq->lock);
                raw_spin_lock(&p->pi_lock);
                raw_spin_lock(&rq->lock);
-               lockdep_repin_lock(&rq->lock, cookie);
+               rq_repin_lock(rq, rf);
        }
 
        if (!(p->state & TASK_NORMAL))
@@ -2140,10 +2100,15 @@ static void try_to_wake_up_local(struct task_struct *p, struct pin_cookie cookie
 
        trace_sched_waking(p);
 
-       if (!task_on_rq_queued(p))
+       if (!task_on_rq_queued(p)) {
+               if (p->in_iowait) {
+                       delayacct_blkio_end();
+                       atomic_dec(&rq->nr_iowait);
+               }
                ttwu_activate(rq, p, ENQUEUE_WAKEUP);
+       }
 
-       ttwu_do_wakeup(rq, p, 0, cookie);
+       ttwu_do_wakeup(rq, p, 0, rf);
        ttwu_stat(p, smp_processor_id(), 0);
 out:
        raw_spin_unlock(&p->pi_lock);
@@ -2427,7 +2392,7 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
         */
        raw_spin_lock_irqsave(&p->pi_lock, flags);
        /*
-        * We're setting the cpu for the first time, we don't migrate,
+        * We're setting the CPU for the first time, we don't migrate,
         * so use __set_task_cpu().
         */
        __set_task_cpu(p, cpu);
@@ -2570,7 +2535,7 @@ void wake_up_new_task(struct task_struct *p)
        /*
         * Fork balancing, do it here and not earlier because:
         *  - cpus_allowed can change in the fork path
-        *  - any previously selected cpu might disappear through hotplug
+        *  - any previously selected CPU might disappear through hotplug
         *
         * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq,
         * as we're not fully set-up yet.
@@ -2578,6 +2543,7 @@ void wake_up_new_task(struct task_struct *p)
        __set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0));
 #endif
        rq = __task_rq_lock(p, &rf);
+       update_rq_clock(rq);
        post_init_entity_util_avg(&p->se);
 
        activate_task(rq, p, 0);
@@ -2590,9 +2556,9 @@ void wake_up_new_task(struct task_struct *p)
                 * Nothing relies on rq->lock after this, so its fine to
                 * drop it.
                 */
-               lockdep_unpin_lock(&rq->lock, rf.cookie);
+               rq_unpin_lock(rq, &rf);
                p->sched_class->task_woken(rq, p);
-               lockdep_repin_lock(&rq->lock, rf.cookie);
+               rq_repin_lock(rq, &rf);
        }
 #endif
        task_rq_unlock(rq, p, &rf);
@@ -2861,7 +2827,7 @@ asmlinkage __visible void schedule_tail(struct task_struct *prev)
  */
 static __always_inline struct rq *
 context_switch(struct rq *rq, struct task_struct *prev,
-              struct task_struct *next, struct pin_cookie cookie)
+              struct task_struct *next, struct rq_flags *rf)
 {
        struct mm_struct *mm, *oldmm;
 
@@ -2887,13 +2853,16 @@ context_switch(struct rq *rq, struct task_struct *prev,
                prev->active_mm = NULL;
                rq->prev_mm = oldmm;
        }
+
+       rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP);
+
        /*
         * Since the runqueue lock will be released by the next
         * task (which is an invalid locking op but in the case
         * of the scheduler it's an obvious special-case), so we
         * do an early lockdep release here:
         */
-       lockdep_unpin_lock(&rq->lock, cookie);
+       rq_unpin_lock(rq, rf);
        spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
 
        /* Here we just switch the register state and the stack. */
@@ -2920,7 +2889,7 @@ unsigned long nr_running(void)
 }
 
 /*
- * Check if only the current task is running on the cpu.
+ * Check if only the current task is running on the CPU.
  *
  * Caution: this function does not check that the caller has disabled
  * preemption, thus the result might have a time-of-check-to-time-of-use
@@ -2949,6 +2918,36 @@ unsigned long long nr_context_switches(void)
        return sum;
 }
 
+/*
+ * IO-wait accounting, and how its mostly bollocks (on SMP).
+ *
+ * The idea behind IO-wait account is to account the idle time that we could
+ * have spend running if it were not for IO. That is, if we were to improve the
+ * storage performance, we'd have a proportional reduction in IO-wait time.
+ *
+ * This all works nicely on UP, where, when a task blocks on IO, we account
+ * idle time as IO-wait, because if the storage were faster, it could've been
+ * running and we'd not be idle.
+ *
+ * This has been extended to SMP, by doing the same for each CPU. This however
+ * is broken.
+ *
+ * Imagine for instance the case where two tasks block on one CPU, only the one
+ * CPU will have IO-wait accounted, while the other has regular idle. Even
+ * though, if the storage were faster, both could've ran at the same time,
+ * utilising both CPUs.
+ *
+ * This means, that when looking globally, the current IO-wait accounting on
+ * SMP is a lower bound, by reason of under accounting.
+ *
+ * Worse, since the numbers are provided per CPU, they are sometimes
+ * interpreted per CPU, and that is nonsensical. A blocked task isn't strictly
+ * associated with any one particular CPU, it can wake to another CPU than it
+ * blocked on. This means the per CPU IO-wait number is meaningless.
+ *
+ * Task CPU affinities can make all that even more 'interesting'.
+ */
+
 unsigned long nr_iowait(void)
 {
        unsigned long i, sum = 0;
@@ -2959,6 +2958,13 @@ unsigned long nr_iowait(void)
        return sum;
 }
 
+/*
+ * Consumers of these two interfaces, like for example the cpufreq menu
+ * governor are using nonsensical data. Boosting frequency for a CPU that has
+ * IO-wait which might not even end up running the task when it does become
+ * runnable.
+ */
+
 unsigned long nr_iowait_cpu(int cpu)
 {
        struct rq *this = cpu_rq(cpu);
@@ -3042,8 +3048,8 @@ unsigned long long task_sched_runtime(struct task_struct *p)
         * So we have a optimization chance when the task's delta_exec is 0.
         * Reading ->on_cpu is racy, but this is ok.
         *
-        * If we race with it leaving cpu, we'll take a lock. So we're correct.
-        * If we race with it entering cpu, unaccounted time is 0. This is
+        * If we race with it leaving CPU, we'll take a lock. So we're correct.
+        * If we race with it entering CPU, unaccounted time is 0. This is
         * indistinguishable from the read occurring a few cycles earlier.
         * If we see ->on_cpu without ->on_rq, the task is leaving, and has
         * been accounted, so we're correct here as well.
@@ -3257,31 +3263,30 @@ static inline void schedule_debug(struct task_struct *prev)
  * Pick up the highest-prio task:
  */
 static inline struct task_struct *
-pick_next_task(struct rq *rq, struct task_struct *prev, struct pin_cookie cookie)
+pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
 {
-       const struct sched_class *class = &fair_sched_class;
+       const struct sched_class *class;
        struct task_struct *p;
 
        /*
         * Optimization: we know that if all tasks are in
         * the fair class we can call that function directly:
         */
-       if (likely(prev->sched_class == class &&
-                  rq->nr_running == rq->cfs.h_nr_running)) {
-               p = fair_sched_class.pick_next_task(rq, prev, cookie);
+       if (likely(rq->nr_running == rq->cfs.h_nr_running)) {
+               p = fair_sched_class.pick_next_task(rq, prev, rf);
                if (unlikely(p == RETRY_TASK))
                        goto again;
 
-               /* assumes fair_sched_class->next == idle_sched_class */
+               /* Assumes fair_sched_class->next == idle_sched_class */
                if (unlikely(!p))
-                       p = idle_sched_class.pick_next_task(rq, prev, cookie);
+                       p = idle_sched_class.pick_next_task(rq, prev, rf);
 
                return p;
        }
 
 again:
        for_each_class(class) {
-               p = class->pick_next_task(rq, prev, cookie);
+               p = class->pick_next_task(rq, prev, rf);
                if (p) {
                        if (unlikely(p == RETRY_TASK))
                                goto again;
@@ -3289,7 +3294,8 @@ again:
                }
        }
 
-       BUG(); /* the idle class will always have a runnable task */
+       /* The idle class should always have a runnable task: */
+       BUG();
 }
 
 /*
@@ -3335,7 +3341,7 @@ static void __sched notrace __schedule(bool preempt)
 {
        struct task_struct *prev, *next;
        unsigned long *switch_count;
-       struct pin_cookie cookie;
+       struct rq_flags rf;
        struct rq *rq;
        int cpu;
 
@@ -3358,9 +3364,10 @@ static void __sched notrace __schedule(bool preempt)
         */
        smp_mb__before_spinlock();
        raw_spin_lock(&rq->lock);
-       cookie = lockdep_pin_lock(&rq->lock);
+       rq_pin_lock(rq, &rf);
 
-       rq->clock_skip_update <<= 1; /* promote REQ to ACT */
+       /* Promote REQ to ACT */
+       rq->clock_update_flags <<= 1;
 
        switch_count = &prev->nivcsw;
        if (!preempt && prev->state) {
@@ -3370,6 +3377,11 @@ static void __sched notrace __schedule(bool preempt)
                        deactivate_task(rq, prev, DEQUEUE_SLEEP);
                        prev->on_rq = 0;
 
+                       if (prev->in_iowait) {
+                               atomic_inc(&rq->nr_iowait);
+                               delayacct_blkio_start();
+                       }
+
                        /*
                         * If a worker went to sleep, notify and ask workqueue
                         * whether it wants to wake up a task to maintain
@@ -3380,7 +3392,7 @@ static void __sched notrace __schedule(bool preempt)
 
                                to_wakeup = wq_worker_sleeping(prev);
                                if (to_wakeup)
-                                       try_to_wake_up_local(to_wakeup, cookie);
+                                       try_to_wake_up_local(to_wakeup, &rf);
                        }
                }
                switch_count = &prev->nvcsw;
@@ -3389,10 +3401,9 @@ static void __sched notrace __schedule(bool preempt)
        if (task_on_rq_queued(prev))
                update_rq_clock(rq);
 
-       next = pick_next_task(rq, prev, cookie);
+       next = pick_next_task(rq, prev, &rf);
        clear_tsk_need_resched(prev);
        clear_preempt_need_resched();
-       rq->clock_skip_update = 0;
 
        if (likely(prev != next)) {
                rq->nr_switches++;
@@ -3400,9 +3411,12 @@ static void __sched notrace __schedule(bool preempt)
                ++*switch_count;
 
                trace_sched_switch(preempt, prev, next);
-               rq = context_switch(rq, prev, next, cookie); /* unlocks the rq */
+
+               /* Also unlocks the rq: */
+               rq = context_switch(rq, prev, next, &rf);
        } else {
-               lockdep_unpin_lock(&rq->lock, cookie);
+               rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP);
+               rq_unpin_lock(rq, &rf);
                raw_spin_unlock_irq(&rq->lock);
        }
 
@@ -3426,14 +3440,18 @@ void __noreturn do_task_dead(void)
        smp_mb();
        raw_spin_unlock_wait(&current->pi_lock);
 
-       /* causes final put_task_struct in finish_task_switch(). */
+       /* Causes final put_task_struct in finish_task_switch(): */
        __set_current_state(TASK_DEAD);
-       current->flags |= PF_NOFREEZE;  /* tell freezer to ignore us */
+
+       /* Tell freezer to ignore us: */
+       current->flags |= PF_NOFREEZE;
+
        __schedule(false);
        BUG();
-       /* Avoid "noreturn function does return".  */
+
+       /* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */
        for (;;)
-               cpu_relax();    /* For when BUG is null */
+               cpu_relax();
 }
 
 static inline void sched_submit_work(struct task_struct *tsk)
@@ -3651,6 +3669,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
        BUG_ON(prio > MAX_PRIO);
 
        rq = __task_rq_lock(p, &rf);
+       update_rq_clock(rq);
 
        /*
         * Idle task boosting is a nono in general. There is one
@@ -3725,7 +3744,8 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
 
        check_class_changed(rq, p, prev_class, oldprio);
 out_unlock:
-       preempt_disable(); /* avoid rq from going away on us */
+       /* Avoid rq from going away on us: */
+       preempt_disable();
        __task_rq_unlock(rq, &rf);
 
        balance_callback(rq);
@@ -3747,6 +3767,8 @@ void set_user_nice(struct task_struct *p, long nice)
         * the task might be in the middle of scheduling on another CPU.
         */
        rq = task_rq_lock(p, &rf);
+       update_rq_clock(rq);
+
        /*
         * The RT priorities are set via sched_setscheduler(), but we still
         * allow the 'normal' nice value to be set - but as expected
@@ -3793,7 +3815,7 @@ EXPORT_SYMBOL(set_user_nice);
  */
 int can_nice(const struct task_struct *p, const int nice)
 {
-       /* convert nice value [19,-20] to rlimit style value [1,40] */
+       /* Convert nice value [19,-20] to rlimit style value [1,40]: */
        int nice_rlim = nice_to_rlimit(nice);
 
        return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
@@ -3849,7 +3871,7 @@ int task_prio(const struct task_struct *p)
 }
 
 /**
- * idle_cpu - is a given cpu idle currently?
+ * idle_cpu - is a given CPU idle currently?
  * @cpu: the processor in question.
  *
  * Return: 1 if the CPU is currently idle. 0 otherwise.
@@ -3873,10 +3895,10 @@ int idle_cpu(int cpu)
 }
 
 /**
- * idle_task - return the idle task for a given cpu.
+ * idle_task - return the idle task for a given CPU.
  * @cpu: the processor in question.
  *
- * Return: The idle task for the cpu @cpu.
+ * Return: The idle task for the CPU @cpu.
  */
 struct task_struct *idle_task(int cpu)
 {
@@ -4042,7 +4064,7 @@ __checkparam_dl(const struct sched_attr *attr)
 }
 
 /*
- * check the target process has a UID that matches the current process's
+ * Check the target process has a UID that matches the current process's:
  */
 static bool check_same_owner(struct task_struct *p)
 {
@@ -4057,8 +4079,7 @@ static bool check_same_owner(struct task_struct *p)
        return match;
 }
 
-static bool dl_param_changed(struct task_struct *p,
-               const struct sched_attr *attr)
+static bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr)
 {
        struct sched_dl_entity *dl_se = &p->dl;
 
@@ -4085,10 +4106,10 @@ static int __sched_setscheduler(struct task_struct *p,
        int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE;
        struct rq *rq;
 
-       /* may grab non-irq protected spin_locks */
+       /* May grab non-irq protected spin_locks: */
        BUG_ON(in_interrupt());
 recheck:
-       /* double check policy once rq lock held */
+       /* Double check policy once rq lock held: */
        if (policy < 0) {
                reset_on_fork = p->sched_reset_on_fork;
                policy = oldpolicy = p->policy;
@@ -4128,11 +4149,11 @@ recheck:
                        unsigned long rlim_rtprio =
                                        task_rlimit(p, RLIMIT_RTPRIO);
 
-                       /* can't set/change the rt policy */
+                       /* Can't set/change the rt policy: */
                        if (policy != p->policy && !rlim_rtprio)
                                return -EPERM;
 
-                       /* can't increase priority */
+                       /* Can't increase priority: */
                        if (attr->sched_priority > p->rt_priority &&
                            attr->sched_priority > rlim_rtprio)
                                return -EPERM;
@@ -4156,11 +4177,11 @@ recheck:
                                return -EPERM;
                }
 
-               /* can't change other user's priorities */
+               /* Can't change other user's priorities: */
                if (!check_same_owner(p))
                        return -EPERM;
 
-               /* Normal users shall not reset the sched_reset_on_fork flag */
+               /* Normal users shall not reset the sched_reset_on_fork flag: */
                if (p->sched_reset_on_fork && !reset_on_fork)
                        return -EPERM;
        }
@@ -4172,16 +4193,17 @@ recheck:
        }
 
        /*
-        * make sure no PI-waiters arrive (or leave) while we are
+        * Make sure no PI-waiters arrive (or leave) while we are
         * changing the priority of the task:
         *
         * To be able to change p->policy safely, the appropriate
         * runqueue lock must be held.
         */
        rq = task_rq_lock(p, &rf);
+       update_rq_clock(rq);
 
        /*
-        * Changing the policy of the stop threads its a very bad idea
+        * Changing the policy of the stop threads its a very bad idea:
         */
        if (p == rq->stop) {
                task_rq_unlock(rq, p, &rf);
@@ -4237,7 +4259,7 @@ change:
 #endif
        }
 
-       /* recheck policy now with rq lock held */
+       /* Re-check policy now with rq lock held: */
        if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
                policy = oldpolicy = -1;
                task_rq_unlock(rq, p, &rf);
@@ -4294,15 +4316,15 @@ change:
                set_curr_task(rq, p);
 
        check_class_changed(rq, p, prev_class, oldprio);
-       preempt_disable(); /* avoid rq from going away on us */
+
+       /* Avoid rq from going away on us: */
+       preempt_disable();
        task_rq_unlock(rq, p, &rf);
 
        if (pi)
                rt_mutex_adjust_pi(p);
 
-       /*
-        * Run balance callbacks after we've adjusted the PI chain.
-        */
+       /* Run balance callbacks after we've adjusted the PI chain: */
        balance_callback(rq);
        preempt_enable();
 
@@ -4395,8 +4417,7 @@ do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
 /*
  * Mimics kernel/events/core.c perf_copy_attr().
  */
-static int sched_copy_attr(struct sched_attr __user *uattr,
-                          struct sched_attr *attr)
+static int sched_copy_attr(struct sched_attr __user *uattr, struct sched_attr *attr)
 {
        u32 size;
        int ret;
@@ -4404,19 +4425,19 @@ static int sched_copy_attr(struct sched_attr __user *uattr,
        if (!access_ok(VERIFY_WRITE, uattr, SCHED_ATTR_SIZE_VER0))
                return -EFAULT;
 
-       /*
-        * zero the full structure, so that a short copy will be nice.
-        */
+       /* Zero the full structure, so that a short copy will be nice: */
        memset(attr, 0, sizeof(*attr));
 
        ret = get_user(size, &uattr->size);
        if (ret)
                return ret;
 
-       if (size > PAGE_SIZE)   /* silly large */
+       /* Bail out on silly large: */
+       if (size > PAGE_SIZE)
                goto err_size;
 
-       if (!size)              /* abi compat */
+       /* ABI compatibility quirk: */
+       if (!size)
                size = SCHED_ATTR_SIZE_VER0;
 
        if (size < SCHED_ATTR_SIZE_VER0)
@@ -4451,7 +4472,7 @@ static int sched_copy_attr(struct sched_attr __user *uattr,
                return -EFAULT;
 
        /*
-        * XXX: do we want to be lenient like existing syscalls; or do we want
+        * XXX: Do we want to be lenient like existing syscalls; or do we want
         * to be strict and return an error on out-of-bounds values?
         */
        attr->sched_nice = clamp(attr->sched_nice, MIN_NICE, MAX_NICE);
@@ -4471,10 +4492,8 @@ err_size:
  *
  * Return: 0 on success. An error code otherwise.
  */
-SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
-               struct sched_param __user *, param)
+SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param)
 {
-       /* negative values for policy are not valid */
        if (policy < 0)
                return -EINVAL;
 
@@ -4784,10 +4803,10 @@ static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
 }
 
 /**
- * sys_sched_setaffinity - set the cpu affinity of a process
+ * sys_sched_setaffinity - set the CPU affinity of a process
  * @pid: pid of the process
  * @len: length in bytes of the bitmask pointed to by user_mask_ptr
- * @user_mask_ptr: user-space pointer to the new cpu mask
+ * @user_mask_ptr: user-space pointer to the new CPU mask
  *
  * Return: 0 on success. An error code otherwise.
  */
@@ -4835,10 +4854,10 @@ out_unlock:
 }
 
 /**
- * sys_sched_getaffinity - get the cpu affinity of a process
+ * sys_sched_getaffinity - get the CPU affinity of a process
  * @pid: pid of the process
  * @len: length in bytes of the bitmask pointed to by user_mask_ptr
- * @user_mask_ptr: user-space pointer to hold the current cpu mask
+ * @user_mask_ptr: user-space pointer to hold the current CPU mask
  *
  * Return: size of CPU mask copied to user_mask_ptr on success. An
  * error code otherwise.
@@ -4966,7 +4985,7 @@ EXPORT_SYMBOL(__cond_resched_softirq);
  * Typical broken usage is:
  *
  * while (!event)
- *     yield();
+ *     yield();
  *
  * where one assumes that yield() will let 'the other' process run that will
  * make event true. If the current task is a SCHED_FIFO task that will never
@@ -5057,31 +5076,48 @@ out_irq:
 }
 EXPORT_SYMBOL_GPL(yield_to);
 
+int io_schedule_prepare(void)
+{
+       int old_iowait = current->in_iowait;
+
+       current->in_iowait = 1;
+       blk_schedule_flush_plug(current);
+
+       return old_iowait;
+}
+
+void io_schedule_finish(int token)
+{
+       current->in_iowait = token;
+}
+
 /*
  * This task is about to go to sleep on IO. Increment rq->nr_iowait so
  * that process accounting knows that this is a task in IO wait state.
  */
 long __sched io_schedule_timeout(long timeout)
 {
-       int old_iowait = current->in_iowait;
-       struct rq *rq;
+       int token;
        long ret;
 
-       current->in_iowait = 1;
-       blk_schedule_flush_plug(current);
-
-       delayacct_blkio_start();
-       rq = raw_rq();
-       atomic_inc(&rq->nr_iowait);
+       token = io_schedule_prepare();
        ret = schedule_timeout(timeout);
-       current->in_iowait = old_iowait;
-       atomic_dec(&rq->nr_iowait);
-       delayacct_blkio_end();
+       io_schedule_finish(token);
 
        return ret;
 }
 EXPORT_SYMBOL(io_schedule_timeout);
 
+void io_schedule(void)
+{
+       int token;
+
+       token = io_schedule_prepare();
+       schedule();
+       io_schedule_finish(token);
+}
+EXPORT_SYMBOL(io_schedule);
+
 /**
  * sys_sched_get_priority_max - return maximum RT priority.
  * @policy: scheduling class.
@@ -5264,7 +5300,7 @@ void init_idle_bootup_task(struct task_struct *idle)
 /**
  * init_idle - set up an idle thread for a given CPU
  * @idle: task in question
- * @cpu: cpu the idle task belongs to
+ * @cpu: CPU the idle task belongs to
  *
  * NOTE: this function does not set the idle thread's NEED_RESCHED
  * flag, to make booting more robust.
@@ -5295,7 +5331,7 @@ void init_idle(struct task_struct *idle, int cpu)
 #endif
        /*
         * We're having a chicken and egg problem, even though we are
-        * holding rq->lock, the cpu isn't yet set to this cpu so the
+        * holding rq->lock, the CPU isn't yet set to this CPU so the
         * lockdep check in task_group() will fail.
         *
         * Similar case to sched_fork(). / Alternatively we could
@@ -5360,7 +5396,7 @@ int task_can_attach(struct task_struct *p,
 
        /*
         * Kthreads which disallow setaffinity shouldn't be moved
-        * to a new cpuset; we don't want to change their cpu
+        * to a new cpuset; we don't want to change their CPU
         * affinity and isolating such threads by their set of
         * allowed nodes is unnecessary.  Thus, cpusets are not
         * applicable for such threads.  This prevents checking for
@@ -5409,7 +5445,7 @@ out:
 
 #ifdef CONFIG_SMP
 
-static bool sched_smp_initialized __read_mostly;
+bool sched_smp_initialized __read_mostly;
 
 #ifdef CONFIG_NUMA_BALANCING
 /* Migrate current task p to target_cpu */
@@ -5461,7 +5497,7 @@ void sched_setnuma(struct task_struct *p, int nid)
 
 #ifdef CONFIG_HOTPLUG_CPU
 /*
- * Ensures that the idle task is using init_mm right before its cpu goes
+ * Ensure that the idle task is using init_mm right before its CPU goes
  * offline.
  */
 void idle_task_exit(void)
@@ -5521,7 +5557,7 @@ static void migrate_tasks(struct rq *dead_rq)
 {
        struct rq *rq = dead_rq;
        struct task_struct *next, *stop = rq->stop;
-       struct pin_cookie cookie;
+       struct rq_flags rf, old_rf;
        int dest_cpu;
 
        /*
@@ -5545,16 +5581,16 @@ static void migrate_tasks(struct rq *dead_rq)
        for (;;) {
                /*
                 * There's this thread running, bail when that's the only
-                * remaining thread.
+                * remaining thread:
                 */
                if (rq->nr_running == 1)
                        break;
 
                /*
-                * pick_next_task assumes pinned rq->lock.
+                * pick_next_task() assumes pinned rq->lock:
                 */
-               cookie = lockdep_pin_lock(&rq->lock);
-               next = pick_next_task(rq, &fake_task, cookie);
+               rq_pin_lock(rq, &rf);
+               next = pick_next_task(rq, &fake_task, &rf);
                BUG_ON(!next);
                next->sched_class->put_prev_task(rq, next);
 
@@ -5567,7 +5603,7 @@ static void migrate_tasks(struct rq *dead_rq)
                 * because !cpu_active at this point, which means load-balance
                 * will not interfere. Also, stop-machine.
                 */
-               lockdep_unpin_lock(&rq->lock, cookie);
+               rq_unpin_lock(rq, &rf);
                raw_spin_unlock(&rq->lock);
                raw_spin_lock(&next->pi_lock);
                raw_spin_lock(&rq->lock);
@@ -5582,6 +5618,13 @@ static void migrate_tasks(struct rq *dead_rq)
                        continue;
                }
 
+               /*
+                * __migrate_task() may return with a different
+                * rq->lock held and a new cookie in 'rf', but we need
+                * to preserve rf::clock_update_flags for 'dead_rq'.
+                */
+               old_rf = rf;
+
                /* Find suitable destination for @next, with force if needed. */
                dest_cpu = select_fallback_rq(dead_rq->cpu, next);
 
@@ -5590,6 +5633,7 @@ static void migrate_tasks(struct rq *dead_rq)
                        raw_spin_unlock(&rq->lock);
                        rq = dead_rq;
                        raw_spin_lock(&rq->lock);
+                       rf = old_rf;
                }
                raw_spin_unlock(&next->pi_lock);
        }
@@ -5598,7 +5642,7 @@ static void migrate_tasks(struct rq *dead_rq)
 }
 #endif /* CONFIG_HOTPLUG_CPU */
 
-static void set_rq_online(struct rq *rq)
+void set_rq_online(struct rq *rq)
 {
        if (!rq->online) {
                const struct sched_class *class;
@@ -5613,7 +5657,7 @@ static void set_rq_online(struct rq *rq)
        }
 }
 
-static void set_rq_offline(struct rq *rq)
+void set_rq_offline(struct rq *rq)
 {
        if (rq->online) {
                const struct sched_class *class;
@@ -5635,1657 +5679,20 @@ static void set_cpu_rq_start_time(unsigned int cpu)
        rq->age_stamp = sched_clock_cpu(cpu);
 }
 
-static cpumask_var_t sched_domains_tmpmask; /* sched_domains_mutex */
-
-#ifdef CONFIG_SCHED_DEBUG
-
-static __read_mostly int sched_debug_enabled;
-
-static int __init sched_debug_setup(char *str)
-{
-       sched_debug_enabled = 1;
-
-       return 0;
-}
-early_param("sched_debug", sched_debug_setup);
+/*
+ * used to mark begin/end of suspend/resume:
+ */
+static int num_cpus_frozen;
 
-static inline bool sched_debug(void)
-{
-       return sched_debug_enabled;
-}
-
-static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
-                                 struct cpumask *groupmask)
-{
-       struct sched_group *group = sd->groups;
-
-       cpumask_clear(groupmask);
-
-       printk(KERN_DEBUG "%*s domain %d: ", level, "", level);
-
-       if (!(sd->flags & SD_LOAD_BALANCE)) {
-               printk("does not load-balance\n");
-               if (sd->parent)
-                       printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain"
-                                       " has parent");
-               return -1;
-       }
-
-       printk(KERN_CONT "span %*pbl level %s\n",
-              cpumask_pr_args(sched_domain_span(sd)), sd->name);
-
-       if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
-               printk(KERN_ERR "ERROR: domain->span does not contain "
-                               "CPU%d\n", cpu);
-       }
-       if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) {
-               printk(KERN_ERR "ERROR: domain->groups does not contain"
-                               " CPU%d\n", cpu);
-       }
-
-       printk(KERN_DEBUG "%*s groups:", level + 1, "");
-       do {
-               if (!group) {
-                       printk("\n");
-                       printk(KERN_ERR "ERROR: group is NULL\n");
-                       break;
-               }
-
-               if (!cpumask_weight(sched_group_cpus(group))) {
-                       printk(KERN_CONT "\n");
-                       printk(KERN_ERR "ERROR: empty group\n");
-                       break;
-               }
-
-               if (!(sd->flags & SD_OVERLAP) &&
-                   cpumask_intersects(groupmask, sched_group_cpus(group))) {
-                       printk(KERN_CONT "\n");
-                       printk(KERN_ERR "ERROR: repeated CPUs\n");
-                       break;
-               }
-
-               cpumask_or(groupmask, groupmask, sched_group_cpus(group));
-
-               printk(KERN_CONT " %*pbl",
-                      cpumask_pr_args(sched_group_cpus(group)));
-               if (group->sgc->capacity != SCHED_CAPACITY_SCALE) {
-                       printk(KERN_CONT " (cpu_capacity = %lu)",
-                               group->sgc->capacity);
-               }
-
-               group = group->next;
-       } while (group != sd->groups);
-       printk(KERN_CONT "\n");
-
-       if (!cpumask_equal(sched_domain_span(sd), groupmask))
-               printk(KERN_ERR "ERROR: groups don't span domain->span\n");
-
-       if (sd->parent &&
-           !cpumask_subset(groupmask, sched_domain_span(sd->parent)))
-               printk(KERN_ERR "ERROR: parent span is not a superset "
-                       "of domain->span\n");
-       return 0;
-}
-
-static void sched_domain_debug(struct sched_domain *sd, int cpu)
-{
-       int level = 0;
-
-       if (!sched_debug_enabled)
-               return;
-
-       if (!sd) {
-               printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
-               return;
-       }
-
-       printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu);
-
-       for (;;) {
-               if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask))
-                       break;
-               level++;
-               sd = sd->parent;
-               if (!sd)
-                       break;
-       }
-}
-#else /* !CONFIG_SCHED_DEBUG */
-
-# define sched_debug_enabled 0
-# define sched_domain_debug(sd, cpu) do { } while (0)
-static inline bool sched_debug(void)
-{
-       return false;
-}
-#endif /* CONFIG_SCHED_DEBUG */
-
-static int sd_degenerate(struct sched_domain *sd)
-{
-       if (cpumask_weight(sched_domain_span(sd)) == 1)
-               return 1;
-
-       /* Following flags need at least 2 groups */
-       if (sd->flags & (SD_LOAD_BALANCE |
-                        SD_BALANCE_NEWIDLE |
-                        SD_BALANCE_FORK |
-                        SD_BALANCE_EXEC |
-                        SD_SHARE_CPUCAPACITY |
-                        SD_ASYM_CPUCAPACITY |
-                        SD_SHARE_PKG_RESOURCES |
-                        SD_SHARE_POWERDOMAIN)) {
-               if (sd->groups != sd->groups->next)
-                       return 0;
-       }
-
-       /* Following flags don't use groups */
-       if (sd->flags & (SD_WAKE_AFFINE))
-               return 0;
-
-       return 1;
-}
-
-static int
-sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
-{
-       unsigned long cflags = sd->flags, pflags = parent->flags;
-
-       if (sd_degenerate(parent))
-               return 1;
-
-       if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent)))
-               return 0;
-
-       /* Flags needing groups don't count if only 1 group in parent */
-       if (parent->groups == parent->groups->next) {
-               pflags &= ~(SD_LOAD_BALANCE |
-                               SD_BALANCE_NEWIDLE |
-                               SD_BALANCE_FORK |
-                               SD_BALANCE_EXEC |
-                               SD_ASYM_CPUCAPACITY |
-                               SD_SHARE_CPUCAPACITY |
-                               SD_SHARE_PKG_RESOURCES |
-                               SD_PREFER_SIBLING |
-                               SD_SHARE_POWERDOMAIN);
-               if (nr_node_ids == 1)
-                       pflags &= ~SD_SERIALIZE;
-       }
-       if (~cflags & pflags)
-               return 0;
-
-       return 1;
-}
-
-static void free_rootdomain(struct rcu_head *rcu)
-{
-       struct root_domain *rd = container_of(rcu, struct root_domain, rcu);
-
-       cpupri_cleanup(&rd->cpupri);
-       cpudl_cleanup(&rd->cpudl);
-       free_cpumask_var(rd->dlo_mask);
-       free_cpumask_var(rd->rto_mask);
-       free_cpumask_var(rd->online);
-       free_cpumask_var(rd->span);
-       kfree(rd);
-}
-
-static void rq_attach_root(struct rq *rq, struct root_domain *rd)
-{
-       struct root_domain *old_rd = NULL;
-       unsigned long flags;
-
-       raw_spin_lock_irqsave(&rq->lock, flags);
-
-       if (rq->rd) {
-               old_rd = rq->rd;
-
-               if (cpumask_test_cpu(rq->cpu, old_rd->online))
-                       set_rq_offline(rq);
-
-               cpumask_clear_cpu(rq->cpu, old_rd->span);
-
-               /*
-                * If we dont want to free the old_rd yet then
-                * set old_rd to NULL to skip the freeing later
-                * in this function:
-                */
-               if (!atomic_dec_and_test(&old_rd->refcount))
-                       old_rd = NULL;
-       }
-
-       atomic_inc(&rd->refcount);
-       rq->rd = rd;
-
-       cpumask_set_cpu(rq->cpu, rd->span);
-       if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
-               set_rq_online(rq);
-
-       raw_spin_unlock_irqrestore(&rq->lock, flags);
-
-       if (old_rd)
-               call_rcu_sched(&old_rd->rcu, free_rootdomain);
-}
-
-static int init_rootdomain(struct root_domain *rd)
-{
-       memset(rd, 0, sizeof(*rd));
-
-       if (!zalloc_cpumask_var(&rd->span, GFP_KERNEL))
-               goto out;
-       if (!zalloc_cpumask_var(&rd->online, GFP_KERNEL))
-               goto free_span;
-       if (!zalloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL))
-               goto free_online;
-       if (!zalloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
-               goto free_dlo_mask;
-
-       init_dl_bw(&rd->dl_bw);
-       if (cpudl_init(&rd->cpudl) != 0)
-               goto free_dlo_mask;
-
-       if (cpupri_init(&rd->cpupri) != 0)
-               goto free_rto_mask;
-       return 0;
-
-free_rto_mask:
-       free_cpumask_var(rd->rto_mask);
-free_dlo_mask:
-       free_cpumask_var(rd->dlo_mask);
-free_online:
-       free_cpumask_var(rd->online);
-free_span:
-       free_cpumask_var(rd->span);
-out:
-       return -ENOMEM;
-}
-
-/*
- * By default the system creates a single root-domain with all cpus as
- * members (mimicking the global state we have today).
- */
-struct root_domain def_root_domain;
-
-static void init_defrootdomain(void)
-{
-       init_rootdomain(&def_root_domain);
-
-       atomic_set(&def_root_domain.refcount, 1);
-}
-
-static struct root_domain *alloc_rootdomain(void)
-{
-       struct root_domain *rd;
-
-       rd = kmalloc(sizeof(*rd), GFP_KERNEL);
-       if (!rd)
-               return NULL;
-
-       if (init_rootdomain(rd) != 0) {
-               kfree(rd);
-               return NULL;
-       }
-
-       return rd;
-}
-
-static void free_sched_groups(struct sched_group *sg, int free_sgc)
-{
-       struct sched_group *tmp, *first;
-
-       if (!sg)
-               return;
-
-       first = sg;
-       do {
-               tmp = sg->next;
-
-               if (free_sgc && atomic_dec_and_test(&sg->sgc->ref))
-                       kfree(sg->sgc);
-
-               kfree(sg);
-               sg = tmp;
-       } while (sg != first);
-}
-
-static void destroy_sched_domain(struct sched_domain *sd)
-{
-       /*
-        * If its an overlapping domain it has private groups, iterate and
-        * nuke them all.
-        */
-       if (sd->flags & SD_OVERLAP) {
-               free_sched_groups(sd->groups, 1);
-       } else if (atomic_dec_and_test(&sd->groups->ref)) {
-               kfree(sd->groups->sgc);
-               kfree(sd->groups);
-       }
-       if (sd->shared && atomic_dec_and_test(&sd->shared->ref))
-               kfree(sd->shared);
-       kfree(sd);
-}
-
-static void destroy_sched_domains_rcu(struct rcu_head *rcu)
-{
-       struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu);
-
-       while (sd) {
-               struct sched_domain *parent = sd->parent;
-               destroy_sched_domain(sd);
-               sd = parent;
-       }
-}
-
-static void destroy_sched_domains(struct sched_domain *sd)
-{
-       if (sd)
-               call_rcu(&sd->rcu, destroy_sched_domains_rcu);
-}
-
-/*
- * Keep a special pointer to the highest sched_domain that has
- * SD_SHARE_PKG_RESOURCE set (Last Level Cache Domain) for this
- * allows us to avoid some pointer chasing select_idle_sibling().
- *
- * Also keep a unique ID per domain (we use the first cpu number in
- * the cpumask of the domain), this allows us to quickly tell if
- * two cpus are in the same cache domain, see cpus_share_cache().
- */
-DEFINE_PER_CPU(struct sched_domain *, sd_llc);
-DEFINE_PER_CPU(int, sd_llc_size);
-DEFINE_PER_CPU(int, sd_llc_id);
-DEFINE_PER_CPU(struct sched_domain_shared *, sd_llc_shared);
-DEFINE_PER_CPU(struct sched_domain *, sd_numa);
-DEFINE_PER_CPU(struct sched_domain *, sd_asym);
-
-static void update_top_cache_domain(int cpu)
-{
-       struct sched_domain_shared *sds = NULL;
-       struct sched_domain *sd;
-       int id = cpu;
-       int size = 1;
-
-       sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES);
-       if (sd) {
-               id = cpumask_first(sched_domain_span(sd));
-               size = cpumask_weight(sched_domain_span(sd));
-               sds = sd->shared;
-       }
-
-       rcu_assign_pointer(per_cpu(sd_llc, cpu), sd);
-       per_cpu(sd_llc_size, cpu) = size;
-       per_cpu(sd_llc_id, cpu) = id;
-       rcu_assign_pointer(per_cpu(sd_llc_shared, cpu), sds);
-
-       sd = lowest_flag_domain(cpu, SD_NUMA);
-       rcu_assign_pointer(per_cpu(sd_numa, cpu), sd);
-
-       sd = highest_flag_domain(cpu, SD_ASYM_PACKING);
-       rcu_assign_pointer(per_cpu(sd_asym, cpu), sd);
-}
-
-/*
- * Attach the domain 'sd' to 'cpu' as its base domain. Callers must
- * hold the hotplug lock.
- */
-static void
-cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
-{
-       struct rq *rq = cpu_rq(cpu);
-       struct sched_domain *tmp;
-
-       /* Remove the sched domains which do not contribute to scheduling. */
-       for (tmp = sd; tmp; ) {
-               struct sched_domain *parent = tmp->parent;
-               if (!parent)
-                       break;
-
-               if (sd_parent_degenerate(tmp, parent)) {
-                       tmp->parent = parent->parent;
-                       if (parent->parent)
-                               parent->parent->child = tmp;
-                       /*
-                        * Transfer SD_PREFER_SIBLING down in case of a
-                        * degenerate parent; the spans match for this
-                        * so the property transfers.
-                        */
-                       if (parent->flags & SD_PREFER_SIBLING)
-                               tmp->flags |= SD_PREFER_SIBLING;
-                       destroy_sched_domain(parent);
-               } else
-                       tmp = tmp->parent;
-       }
-
-       if (sd && sd_degenerate(sd)) {
-               tmp = sd;
-               sd = sd->parent;
-               destroy_sched_domain(tmp);
-               if (sd)
-                       sd->child = NULL;
-       }
-
-       sched_domain_debug(sd, cpu);
-
-       rq_attach_root(rq, rd);
-       tmp = rq->sd;
-       rcu_assign_pointer(rq->sd, sd);
-       destroy_sched_domains(tmp);
-
-       update_top_cache_domain(cpu);
-}
-
-/* Setup the mask of cpus configured for isolated domains */
-static int __init isolated_cpu_setup(char *str)
-{
-       int ret;
-
-       alloc_bootmem_cpumask_var(&cpu_isolated_map);
-       ret = cpulist_parse(str, cpu_isolated_map);
-       if (ret) {
-               pr_err("sched: Error, all isolcpus= values must be between 0 and %d\n", nr_cpu_ids);
-               return 0;
-       }
-       return 1;
-}
-__setup("isolcpus=", isolated_cpu_setup);
-
-struct s_data {
-       struct sched_domain ** __percpu sd;
-       struct root_domain      *rd;
-};
-
-enum s_alloc {
-       sa_rootdomain,
-       sa_sd,
-       sa_sd_storage,
-       sa_none,
-};
-
-/*
- * Build an iteration mask that can exclude certain CPUs from the upwards
- * domain traversal.
- *
- * Asymmetric node setups can result in situations where the domain tree is of
- * unequal depth, make sure to skip domains that already cover the entire
- * range.
- *
- * In that case build_sched_domains() will have terminated the iteration early
- * and our sibling sd spans will be empty. Domains should always include the
- * cpu they're built on, so check that.
- *
- */
-static void build_group_mask(struct sched_domain *sd, struct sched_group *sg)
-{
-       const struct cpumask *span = sched_domain_span(sd);
-       struct sd_data *sdd = sd->private;
-       struct sched_domain *sibling;
-       int i;
-
-       for_each_cpu(i, span) {
-               sibling = *per_cpu_ptr(sdd->sd, i);
-               if (!cpumask_test_cpu(i, sched_domain_span(sibling)))
-                       continue;
-
-               cpumask_set_cpu(i, sched_group_mask(sg));
-       }
-}
-
-/*
- * Return the canonical balance cpu for this group, this is the first cpu
- * of this group that's also in the iteration mask.
- */
-int group_balance_cpu(struct sched_group *sg)
-{
-       return cpumask_first_and(sched_group_cpus(sg), sched_group_mask(sg));
-}
-
-static int
-build_overlap_sched_groups(struct sched_domain *sd, int cpu)
-{
-       struct sched_group *first = NULL, *last = NULL, *groups = NULL, *sg;
-       const struct cpumask *span = sched_domain_span(sd);
-       struct cpumask *covered = sched_domains_tmpmask;
-       struct sd_data *sdd = sd->private;
-       struct sched_domain *sibling;
-       int i;
-
-       cpumask_clear(covered);
-
-       for_each_cpu(i, span) {
-               struct cpumask *sg_span;
-
-               if (cpumask_test_cpu(i, covered))
-                       continue;
-
-               sibling = *per_cpu_ptr(sdd->sd, i);
-
-               /* See the comment near build_group_mask(). */
-               if (!cpumask_test_cpu(i, sched_domain_span(sibling)))
-                       continue;
-
-               sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
-                               GFP_KERNEL, cpu_to_node(cpu));
-
-               if (!sg)
-                       goto fail;
-
-               sg_span = sched_group_cpus(sg);
-               if (sibling->child)
-                       cpumask_copy(sg_span, sched_domain_span(sibling->child));
-               else
-                       cpumask_set_cpu(i, sg_span);
-
-               cpumask_or(covered, covered, sg_span);
-
-               sg->sgc = *per_cpu_ptr(sdd->sgc, i);
-               if (atomic_inc_return(&sg->sgc->ref) == 1)
-                       build_group_mask(sd, sg);
-
-               /*
-                * Initialize sgc->capacity such that even if we mess up the
-                * domains and no possible iteration will get us here, we won't
-                * die on a /0 trap.
-                */
-               sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span);
-               sg->sgc->min_capacity = SCHED_CAPACITY_SCALE;
-
-               /*
-                * Make sure the first group of this domain contains the
-                * canonical balance cpu. Otherwise the sched_domain iteration
-                * breaks. See update_sg_lb_stats().
-                */
-               if ((!groups && cpumask_test_cpu(cpu, sg_span)) ||
-                   group_balance_cpu(sg) == cpu)
-                       groups = sg;
-
-               if (!first)
-                       first = sg;
-               if (last)
-                       last->next = sg;
-               last = sg;
-               last->next = first;
-       }
-       sd->groups = groups;
-
-       return 0;
-
-fail:
-       free_sched_groups(first, 0);
-
-       return -ENOMEM;
-}
-
-static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg)
-{
-       struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
-       struct sched_domain *child = sd->child;
-
-       if (child)
-               cpu = cpumask_first(sched_domain_span(child));
-
-       if (sg) {
-               *sg = *per_cpu_ptr(sdd->sg, cpu);
-               (*sg)->sgc = *per_cpu_ptr(sdd->sgc, cpu);
-               atomic_set(&(*sg)->sgc->ref, 1); /* for claim_allocations */
-       }
-
-       return cpu;
-}
-
-/*
- * build_sched_groups will build a circular linked list of the groups
- * covered by the given span, and will set each group's ->cpumask correctly,
- * and ->cpu_capacity to 0.
- *
- * Assumes the sched_domain tree is fully constructed
- */
-static int
-build_sched_groups(struct sched_domain *sd, int cpu)
-{
-       struct sched_group *first = NULL, *last = NULL;
-       struct sd_data *sdd = sd->private;
-       const struct cpumask *span = sched_domain_span(sd);
-       struct cpumask *covered;
-       int i;
-
-       get_group(cpu, sdd, &sd->groups);
-       atomic_inc(&sd->groups->ref);
-
-       if (cpu != cpumask_first(span))
-               return 0;
-
-       lockdep_assert_held(&sched_domains_mutex);
-       covered = sched_domains_tmpmask;
-
-       cpumask_clear(covered);
-
-       for_each_cpu(i, span) {
-               struct sched_group *sg;
-               int group, j;
-
-               if (cpumask_test_cpu(i, covered))
-                       continue;
-
-               group = get_group(i, sdd, &sg);
-               cpumask_setall(sched_group_mask(sg));
-
-               for_each_cpu(j, span) {
-                       if (get_group(j, sdd, NULL) != group)
-                               continue;
-
-                       cpumask_set_cpu(j, covered);
-                       cpumask_set_cpu(j, sched_group_cpus(sg));
-               }
-
-               if (!first)
-                       first = sg;
-               if (last)
-                       last->next = sg;
-               last = sg;
-       }
-       last->next = first;
-
-       return 0;
-}
-
-/*
- * Initialize sched groups cpu_capacity.
- *
- * cpu_capacity indicates the capacity of sched group, which is used while
- * distributing the load between different sched groups in a sched domain.
- * Typically cpu_capacity for all the groups in a sched domain will be same
- * unless there are asymmetries in the topology. If there are asymmetries,
- * group having more cpu_capacity will pickup more load compared to the
- * group having less cpu_capacity.
- */
-static void init_sched_groups_capacity(int cpu, struct sched_domain *sd)
-{
-       struct sched_group *sg = sd->groups;
-
-       WARN_ON(!sg);
-
-       do {
-               int cpu, max_cpu = -1;
-
-               sg->group_weight = cpumask_weight(sched_group_cpus(sg));
-
-               if (!(sd->flags & SD_ASYM_PACKING))
-                       goto next;
-
-               for_each_cpu(cpu, sched_group_cpus(sg)) {
-                       if (max_cpu < 0)
-                               max_cpu = cpu;
-                       else if (sched_asym_prefer(cpu, max_cpu))
-                               max_cpu = cpu;
-               }
-               sg->asym_prefer_cpu = max_cpu;
-
-next:
-               sg = sg->next;
-       } while (sg != sd->groups);
-
-       if (cpu != group_balance_cpu(sg))
-               return;
-
-       update_group_capacity(sd, cpu);
-}
-
-/*
- * Initializers for schedule domains
- * Non-inlined to reduce accumulated stack pressure in build_sched_domains()
- */
-
-static int default_relax_domain_level = -1;
-int sched_domain_level_max;
-
-static int __init setup_relax_domain_level(char *str)
-{
-       if (kstrtoint(str, 0, &default_relax_domain_level))
-               pr_warn("Unable to set relax_domain_level\n");
-
-       return 1;
-}
-__setup("relax_domain_level=", setup_relax_domain_level);
-
-static void set_domain_attribute(struct sched_domain *sd,
-                                struct sched_domain_attr *attr)
-{
-       int request;
-
-       if (!attr || attr->relax_domain_level < 0) {
-               if (default_relax_domain_level < 0)
-                       return;
-               else
-                       request = default_relax_domain_level;
-       } else
-               request = attr->relax_domain_level;
-       if (request < sd->level) {
-               /* turn off idle balance on this domain */
-               sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
-       } else {
-               /* turn on idle balance on this domain */
-               sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
-       }
-}
-
-static void __sdt_free(const struct cpumask *cpu_map);
-static int __sdt_alloc(const struct cpumask *cpu_map);
-
-static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
-                                const struct cpumask *cpu_map)
-{
-       switch (what) {
-       case sa_rootdomain:
-               if (!atomic_read(&d->rd->refcount))
-                       free_rootdomain(&d->rd->rcu); /* fall through */
-       case sa_sd:
-               free_percpu(d->sd); /* fall through */
-       case sa_sd_storage:
-               __sdt_free(cpu_map); /* fall through */
-       case sa_none:
-               break;
-       }
-}
-
-static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
-                                                  const struct cpumask *cpu_map)
-{
-       memset(d, 0, sizeof(*d));
-
-       if (__sdt_alloc(cpu_map))
-               return sa_sd_storage;
-       d->sd = alloc_percpu(struct sched_domain *);
-       if (!d->sd)
-               return sa_sd_storage;
-       d->rd = alloc_rootdomain();
-       if (!d->rd)
-               return sa_sd;
-       return sa_rootdomain;
-}
-
-/*
- * NULL the sd_data elements we've used to build the sched_domain and
- * sched_group structure so that the subsequent __free_domain_allocs()
- * will not free the data we're using.
- */
-static void claim_allocations(int cpu, struct sched_domain *sd)
-{
-       struct sd_data *sdd = sd->private;
-
-       WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd);
-       *per_cpu_ptr(sdd->sd, cpu) = NULL;
-
-       if (atomic_read(&(*per_cpu_ptr(sdd->sds, cpu))->ref))
-               *per_cpu_ptr(sdd->sds, cpu) = NULL;
-
-       if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref))
-               *per_cpu_ptr(sdd->sg, cpu) = NULL;
-
-       if (atomic_read(&(*per_cpu_ptr(sdd->sgc, cpu))->ref))
-               *per_cpu_ptr(sdd->sgc, cpu) = NULL;
-}
-
-#ifdef CONFIG_NUMA
-static int sched_domains_numa_levels;
-enum numa_topology_type sched_numa_topology_type;
-static int *sched_domains_numa_distance;
-int sched_max_numa_distance;
-static struct cpumask ***sched_domains_numa_masks;
-static int sched_domains_curr_level;
-#endif
-
-/*
- * SD_flags allowed in topology descriptions.
- *
- * These flags are purely descriptive of the topology and do not prescribe
- * behaviour. Behaviour is artificial and mapped in the below sd_init()
- * function:
- *
- *   SD_SHARE_CPUCAPACITY   - describes SMT topologies
- *   SD_SHARE_PKG_RESOURCES - describes shared caches
- *   SD_NUMA                - describes NUMA topologies
- *   SD_SHARE_POWERDOMAIN   - describes shared power domain
- *   SD_ASYM_CPUCAPACITY    - describes mixed capacity topologies
- *
- * Odd one out, which beside describing the topology has a quirk also
- * prescribes the desired behaviour that goes along with it:
- *
- *   SD_ASYM_PACKING        - describes SMT quirks
- */
-#define TOPOLOGY_SD_FLAGS              \
-       (SD_SHARE_CPUCAPACITY |         \
-        SD_SHARE_PKG_RESOURCES |       \
-        SD_NUMA |                      \
-        SD_ASYM_PACKING |              \
-        SD_ASYM_CPUCAPACITY |          \
-        SD_SHARE_POWERDOMAIN)
-
-static struct sched_domain *
-sd_init(struct sched_domain_topology_level *tl,
-       const struct cpumask *cpu_map,
-       struct sched_domain *child, int cpu)
-{
-       struct sd_data *sdd = &tl->data;
-       struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
-       int sd_id, sd_weight, sd_flags = 0;
-
-#ifdef CONFIG_NUMA
-       /*
-        * Ugly hack to pass state to sd_numa_mask()...
-        */
-       sched_domains_curr_level = tl->numa_level;
-#endif
-
-       sd_weight = cpumask_weight(tl->mask(cpu));
-
-       if (tl->sd_flags)
-               sd_flags = (*tl->sd_flags)();
-       if (WARN_ONCE(sd_flags & ~TOPOLOGY_SD_FLAGS,
-                       "wrong sd_flags in topology description\n"))
-               sd_flags &= ~TOPOLOGY_SD_FLAGS;
-
-       *sd = (struct sched_domain){
-               .min_interval           = sd_weight,
-               .max_interval           = 2*sd_weight,
-               .busy_factor            = 32,
-               .imbalance_pct          = 125,
-
-               .cache_nice_tries       = 0,
-               .busy_idx               = 0,
-               .idle_idx               = 0,
-               .newidle_idx            = 0,
-               .wake_idx               = 0,
-               .forkexec_idx           = 0,
-
-               .flags                  = 1*SD_LOAD_BALANCE
-                                       | 1*SD_BALANCE_NEWIDLE
-                                       | 1*SD_BALANCE_EXEC
-                                       | 1*SD_BALANCE_FORK
-                                       | 0*SD_BALANCE_WAKE
-                                       | 1*SD_WAKE_AFFINE
-                                       | 0*SD_SHARE_CPUCAPACITY
-                                       | 0*SD_SHARE_PKG_RESOURCES
-                                       | 0*SD_SERIALIZE
-                                       | 0*SD_PREFER_SIBLING
-                                       | 0*SD_NUMA
-                                       | sd_flags
-                                       ,
-
-               .last_balance           = jiffies,
-               .balance_interval       = sd_weight,
-               .smt_gain               = 0,
-               .max_newidle_lb_cost    = 0,
-               .next_decay_max_lb_cost = jiffies,
-               .child                  = child,
-#ifdef CONFIG_SCHED_DEBUG
-               .name                   = tl->name,
-#endif
-       };
-
-       cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu));
-       sd_id = cpumask_first(sched_domain_span(sd));
-
-       /*
-        * Convert topological properties into behaviour.
-        */
-
-       if (sd->flags & SD_ASYM_CPUCAPACITY) {
-               struct sched_domain *t = sd;
-
-               for_each_lower_domain(t)
-                       t->flags |= SD_BALANCE_WAKE;
-       }
-
-       if (sd->flags & SD_SHARE_CPUCAPACITY) {
-               sd->flags |= SD_PREFER_SIBLING;
-               sd->imbalance_pct = 110;
-               sd->smt_gain = 1178; /* ~15% */
-
-       } else if (sd->flags & SD_SHARE_PKG_RESOURCES) {
-               sd->imbalance_pct = 117;
-               sd->cache_nice_tries = 1;
-               sd->busy_idx = 2;
-
-#ifdef CONFIG_NUMA
-       } else if (sd->flags & SD_NUMA) {
-               sd->cache_nice_tries = 2;
-               sd->busy_idx = 3;
-               sd->idle_idx = 2;
-
-               sd->flags |= SD_SERIALIZE;
-               if (sched_domains_numa_distance[tl->numa_level] > RECLAIM_DISTANCE) {
-                       sd->flags &= ~(SD_BALANCE_EXEC |
-                                      SD_BALANCE_FORK |
-                                      SD_WAKE_AFFINE);
-               }
-
-#endif
-       } else {
-               sd->flags |= SD_PREFER_SIBLING;
-               sd->cache_nice_tries = 1;
-               sd->busy_idx = 2;
-               sd->idle_idx = 1;
-       }
-
-       /*
-        * For all levels sharing cache; connect a sched_domain_shared
-        * instance.
-        */
-       if (sd->flags & SD_SHARE_PKG_RESOURCES) {
-               sd->shared = *per_cpu_ptr(sdd->sds, sd_id);
-               atomic_inc(&sd->shared->ref);
-               atomic_set(&sd->shared->nr_busy_cpus, sd_weight);
-       }
-
-       sd->private = sdd;
-
-       return sd;
-}
-
-/*
- * Topology list, bottom-up.
- */
-static struct sched_domain_topology_level default_topology[] = {
-#ifdef CONFIG_SCHED_SMT
-       { cpu_smt_mask, cpu_smt_flags, SD_INIT_NAME(SMT) },
-#endif
-#ifdef CONFIG_SCHED_MC
-       { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
-#endif
-       { cpu_cpu_mask, SD_INIT_NAME(DIE) },
-       { NULL, },
-};
-
-static struct sched_domain_topology_level *sched_domain_topology =
-       default_topology;
-
-#define for_each_sd_topology(tl)                       \
-       for (tl = sched_domain_topology; tl->mask; tl++)
-
-void set_sched_topology(struct sched_domain_topology_level *tl)
-{
-       if (WARN_ON_ONCE(sched_smp_initialized))
-               return;
-
-       sched_domain_topology = tl;
-}
-
-#ifdef CONFIG_NUMA
-
-static const struct cpumask *sd_numa_mask(int cpu)
-{
-       return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)];
-}
-
-static void sched_numa_warn(const char *str)
-{
-       static int done = false;
-       int i,j;
-
-       if (done)
-               return;
-
-       done = true;
-
-       printk(KERN_WARNING "ERROR: %s\n\n", str);
-
-       for (i = 0; i < nr_node_ids; i++) {
-               printk(KERN_WARNING "  ");
-               for (j = 0; j < nr_node_ids; j++)
-                       printk(KERN_CONT "%02d ", node_distance(i,j));
-               printk(KERN_CONT "\n");
-       }
-       printk(KERN_WARNING "\n");
-}
-
-bool find_numa_distance(int distance)
-{
-       int i;
-
-       if (distance == node_distance(0, 0))
-               return true;
-
-       for (i = 0; i < sched_domains_numa_levels; i++) {
-               if (sched_domains_numa_distance[i] == distance)
-                       return true;
-       }
-
-       return false;
-}
-
-/*
- * A system can have three types of NUMA topology:
- * NUMA_DIRECT: all nodes are directly connected, or not a NUMA system
- * NUMA_GLUELESS_MESH: some nodes reachable through intermediary nodes
- * NUMA_BACKPLANE: nodes can reach other nodes through a backplane
- *
- * The difference between a glueless mesh topology and a backplane
- * topology lies in whether communication between not directly
- * connected nodes goes through intermediary nodes (where programs
- * could run), or through backplane controllers. This affects
- * placement of programs.
- *
- * The type of topology can be discerned with the following tests:
- * - If the maximum distance between any nodes is 1 hop, the system
- *   is directly connected.
- * - If for two nodes A and B, located N > 1 hops away from each other,
- *   there is an intermediary node C, which is < N hops away from both
- *   nodes A and B, the system is a glueless mesh.
- */
-static void init_numa_topology_type(void)
-{
-       int a, b, c, n;
-
-       n = sched_max_numa_distance;
-
-       if (sched_domains_numa_levels <= 1) {
-               sched_numa_topology_type = NUMA_DIRECT;
-               return;
-       }
-
-       for_each_online_node(a) {
-               for_each_online_node(b) {
-                       /* Find two nodes furthest removed from each other. */
-                       if (node_distance(a, b) < n)
-                               continue;
-
-                       /* Is there an intermediary node between a and b? */
-                       for_each_online_node(c) {
-                               if (node_distance(a, c) < n &&
-                                   node_distance(b, c) < n) {
-                                       sched_numa_topology_type =
-                                                       NUMA_GLUELESS_MESH;
-                                       return;
-                               }
-                       }
-
-                       sched_numa_topology_type = NUMA_BACKPLANE;
-                       return;
-               }
-       }
-}
-
-static void sched_init_numa(void)
-{
-       int next_distance, curr_distance = node_distance(0, 0);
-       struct sched_domain_topology_level *tl;
-       int level = 0;
-       int i, j, k;
-
-       sched_domains_numa_distance = kzalloc(sizeof(int) * nr_node_ids, GFP_KERNEL);
-       if (!sched_domains_numa_distance)
-               return;
-
-       /*
-        * O(nr_nodes^2) deduplicating selection sort -- in order to find the
-        * unique distances in the node_distance() table.
-        *
-        * Assumes node_distance(0,j) includes all distances in
-        * node_distance(i,j) in order to avoid cubic time.
-        */
-       next_distance = curr_distance;
-       for (i = 0; i < nr_node_ids; i++) {
-               for (j = 0; j < nr_node_ids; j++) {
-                       for (k = 0; k < nr_node_ids; k++) {
-                               int distance = node_distance(i, k);
-
-                               if (distance > curr_distance &&
-                                   (distance < next_distance ||
-                                    next_distance == curr_distance))
-                                       next_distance = distance;
-
-                               /*
-                                * While not a strong assumption it would be nice to know
-                                * about cases where if node A is connected to B, B is not
-                                * equally connected to A.
-                                */
-                               if (sched_debug() && node_distance(k, i) != distance)
-                                       sched_numa_warn("Node-distance not symmetric");
-
-                               if (sched_debug() && i && !find_numa_distance(distance))
-                                       sched_numa_warn("Node-0 not representative");
-                       }
-                       if (next_distance != curr_distance) {
-                               sched_domains_numa_distance[level++] = next_distance;
-                               sched_domains_numa_levels = level;
-                               curr_distance = next_distance;
-                       } else break;
-               }
-
-               /*
-                * In case of sched_debug() we verify the above assumption.
-                */
-               if (!sched_debug())
-                       break;
-       }
-
-       if (!level)
-               return;
-
-       /*
-        * 'level' contains the number of unique distances, excluding the
-        * identity distance node_distance(i,i).
-        *
-        * The sched_domains_numa_distance[] array includes the actual distance
-        * numbers.
-        */
-
-       /*
-        * Here, we should temporarily reset sched_domains_numa_levels to 0.
-        * If it fails to allocate memory for array sched_domains_numa_masks[][],
-        * the array will contain less then 'level' members. This could be
-        * dangerous when we use it to iterate array sched_domains_numa_masks[][]
-        * in other functions.
-        *
-        * We reset it to 'level' at the end of this function.
-        */
-       sched_domains_numa_levels = 0;
-
-       sched_domains_numa_masks = kzalloc(sizeof(void *) * level, GFP_KERNEL);
-       if (!sched_domains_numa_masks)
-               return;
-
-       /*
-        * Now for each level, construct a mask per node which contains all
-        * cpus of nodes that are that many hops away from us.
-        */
-       for (i = 0; i < level; i++) {
-               sched_domains_numa_masks[i] =
-                       kzalloc(nr_node_ids * sizeof(void *), GFP_KERNEL);
-               if (!sched_domains_numa_masks[i])
-                       return;
-
-               for (j = 0; j < nr_node_ids; j++) {
-                       struct cpumask *mask = kzalloc(cpumask_size(), GFP_KERNEL);
-                       if (!mask)
-                               return;
-
-                       sched_domains_numa_masks[i][j] = mask;
-
-                       for_each_node(k) {
-                               if (node_distance(j, k) > sched_domains_numa_distance[i])
-                                       continue;
-
-                               cpumask_or(mask, mask, cpumask_of_node(k));
-                       }
-               }
-       }
-
-       /* Compute default topology size */
-       for (i = 0; sched_domain_topology[i].mask; i++);
-
-       tl = kzalloc((i + level + 1) *
-                       sizeof(struct sched_domain_topology_level), GFP_KERNEL);
-       if (!tl)
-               return;
-
-       /*
-        * Copy the default topology bits..
-        */
-       for (i = 0; sched_domain_topology[i].mask; i++)
-               tl[i] = sched_domain_topology[i];
-
-       /*
-        * .. and append 'j' levels of NUMA goodness.
-        */
-       for (j = 0; j < level; i++, j++) {
-               tl[i] = (struct sched_domain_topology_level){
-                       .mask = sd_numa_mask,
-                       .sd_flags = cpu_numa_flags,
-                       .flags = SDTL_OVERLAP,
-                       .numa_level = j,
-                       SD_INIT_NAME(NUMA)
-               };
-       }
-
-       sched_domain_topology = tl;
-
-       sched_domains_numa_levels = level;
-       sched_max_numa_distance = sched_domains_numa_distance[level - 1];
-
-       init_numa_topology_type();
-}
-
-static void sched_domains_numa_masks_set(unsigned int cpu)
-{
-       int node = cpu_to_node(cpu);
-       int i, j;
-
-       for (i = 0; i < sched_domains_numa_levels; i++) {
-               for (j = 0; j < nr_node_ids; j++) {
-                       if (node_distance(j, node) <= sched_domains_numa_distance[i])
-                               cpumask_set_cpu(cpu, sched_domains_numa_masks[i][j]);
-               }
-       }
-}
-
-static void sched_domains_numa_masks_clear(unsigned int cpu)
-{
-       int i, j;
-
-       for (i = 0; i < sched_domains_numa_levels; i++) {
-               for (j = 0; j < nr_node_ids; j++)
-                       cpumask_clear_cpu(cpu, sched_domains_numa_masks[i][j]);
-       }
-}
-
-#else
-static inline void sched_init_numa(void) { }
-static void sched_domains_numa_masks_set(unsigned int cpu) { }
-static void sched_domains_numa_masks_clear(unsigned int cpu) { }
-#endif /* CONFIG_NUMA */
-
-static int __sdt_alloc(const struct cpumask *cpu_map)
-{
-       struct sched_domain_topology_level *tl;
-       int j;
-
-       for_each_sd_topology(tl) {
-               struct sd_data *sdd = &tl->data;
-
-               sdd->sd = alloc_percpu(struct sched_domain *);
-               if (!sdd->sd)
-                       return -ENOMEM;
-
-               sdd->sds = alloc_percpu(struct sched_domain_shared *);
-               if (!sdd->sds)
-                       return -ENOMEM;
-
-               sdd->sg = alloc_percpu(struct sched_group *);
-               if (!sdd->sg)
-                       return -ENOMEM;
-
-               sdd->sgc = alloc_percpu(struct sched_group_capacity *);
-               if (!sdd->sgc)
-                       return -ENOMEM;
-
-               for_each_cpu(j, cpu_map) {
-                       struct sched_domain *sd;
-                       struct sched_domain_shared *sds;
-                       struct sched_group *sg;
-                       struct sched_group_capacity *sgc;
-
-                       sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(),
-                                       GFP_KERNEL, cpu_to_node(j));
-                       if (!sd)
-                               return -ENOMEM;
-
-                       *per_cpu_ptr(sdd->sd, j) = sd;
-
-                       sds = kzalloc_node(sizeof(struct sched_domain_shared),
-                                       GFP_KERNEL, cpu_to_node(j));
-                       if (!sds)
-                               return -ENOMEM;
-
-                       *per_cpu_ptr(sdd->sds, j) = sds;
-
-                       sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
-                                       GFP_KERNEL, cpu_to_node(j));
-                       if (!sg)
-                               return -ENOMEM;
-
-                       sg->next = sg;
-
-                       *per_cpu_ptr(sdd->sg, j) = sg;
-
-                       sgc = kzalloc_node(sizeof(struct sched_group_capacity) + cpumask_size(),
-                                       GFP_KERNEL, cpu_to_node(j));
-                       if (!sgc)
-                               return -ENOMEM;
-
-                       *per_cpu_ptr(sdd->sgc, j) = sgc;
-               }
-       }
-
-       return 0;
-}
-
-static void __sdt_free(const struct cpumask *cpu_map)
-{
-       struct sched_domain_topology_level *tl;
-       int j;
-
-       for_each_sd_topology(tl) {
-               struct sd_data *sdd = &tl->data;
-
-               for_each_cpu(j, cpu_map) {
-                       struct sched_domain *sd;
-
-                       if (sdd->sd) {
-                               sd = *per_cpu_ptr(sdd->sd, j);
-                               if (sd && (sd->flags & SD_OVERLAP))
-                                       free_sched_groups(sd->groups, 0);
-                               kfree(*per_cpu_ptr(sdd->sd, j));
-                       }
-
-                       if (sdd->sds)
-                               kfree(*per_cpu_ptr(sdd->sds, j));
-                       if (sdd->sg)
-                               kfree(*per_cpu_ptr(sdd->sg, j));
-                       if (sdd->sgc)
-                               kfree(*per_cpu_ptr(sdd->sgc, j));
-               }
-               free_percpu(sdd->sd);
-               sdd->sd = NULL;
-               free_percpu(sdd->sds);
-               sdd->sds = NULL;
-               free_percpu(sdd->sg);
-               sdd->sg = NULL;
-               free_percpu(sdd->sgc);
-               sdd->sgc = NULL;
-       }
-}
-
-struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
-               const struct cpumask *cpu_map, struct sched_domain_attr *attr,
-               struct sched_domain *child, int cpu)
-{
-       struct sched_domain *sd = sd_init(tl, cpu_map, child, cpu);
-
-       if (child) {
-               sd->level = child->level + 1;
-               sched_domain_level_max = max(sched_domain_level_max, sd->level);
-               child->parent = sd;
-
-               if (!cpumask_subset(sched_domain_span(child),
-                                   sched_domain_span(sd))) {
-                       pr_err("BUG: arch topology borken\n");
-#ifdef CONFIG_SCHED_DEBUG
-                       pr_err("     the %s domain not a subset of the %s domain\n",
-                                       child->name, sd->name);
-#endif
-                       /* Fixup, ensure @sd has at least @child cpus. */
-                       cpumask_or(sched_domain_span(sd),
-                                  sched_domain_span(sd),
-                                  sched_domain_span(child));
-               }
-
-       }
-       set_domain_attribute(sd, attr);
-
-       return sd;
-}
-
-/*
- * Build sched domains for a given set of cpus and attach the sched domains
- * to the individual cpus
- */
-static int build_sched_domains(const struct cpumask *cpu_map,
-                              struct sched_domain_attr *attr)
-{
-       enum s_alloc alloc_state;
-       struct sched_domain *sd;
-       struct s_data d;
-       struct rq *rq = NULL;
-       int i, ret = -ENOMEM;
-
-       alloc_state = __visit_domain_allocation_hell(&d, cpu_map);
-       if (alloc_state != sa_rootdomain)
-               goto error;
-
-       /* Set up domains for cpus specified by the cpu_map. */
-       for_each_cpu(i, cpu_map) {
-               struct sched_domain_topology_level *tl;
-
-               sd = NULL;
-               for_each_sd_topology(tl) {
-                       sd = build_sched_domain(tl, cpu_map, attr, sd, i);
-                       if (tl == sched_domain_topology)
-                               *per_cpu_ptr(d.sd, i) = sd;
-                       if (tl->flags & SDTL_OVERLAP || sched_feat(FORCE_SD_OVERLAP))
-                               sd->flags |= SD_OVERLAP;
-                       if (cpumask_equal(cpu_map, sched_domain_span(sd)))
-                               break;
-               }
-       }
-
-       /* Build the groups for the domains */
-       for_each_cpu(i, cpu_map) {
-               for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
-                       sd->span_weight = cpumask_weight(sched_domain_span(sd));
-                       if (sd->flags & SD_OVERLAP) {
-                               if (build_overlap_sched_groups(sd, i))
-                                       goto error;
-                       } else {
-                               if (build_sched_groups(sd, i))
-                                       goto error;
-                       }
-               }
-       }
-
-       /* Calculate CPU capacity for physical packages and nodes */
-       for (i = nr_cpumask_bits-1; i >= 0; i--) {
-               if (!cpumask_test_cpu(i, cpu_map))
-                       continue;
-
-               for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
-                       claim_allocations(i, sd);
-                       init_sched_groups_capacity(i, sd);
-               }
-       }
-
-       /* Attach the domains */
-       rcu_read_lock();
-       for_each_cpu(i, cpu_map) {
-               rq = cpu_rq(i);
-               sd = *per_cpu_ptr(d.sd, i);
-
-               /* Use READ_ONCE()/WRITE_ONCE() to avoid load/store tearing: */
-               if (rq->cpu_capacity_orig > READ_ONCE(d.rd->max_cpu_capacity))
-                       WRITE_ONCE(d.rd->max_cpu_capacity, rq->cpu_capacity_orig);
-
-               cpu_attach_domain(sd, d.rd, i);
-       }
-       rcu_read_unlock();
-
-       if (rq && sched_debug_enabled) {
-               pr_info("span: %*pbl (max cpu_capacity = %lu)\n",
-                       cpumask_pr_args(cpu_map), rq->rd->max_cpu_capacity);
-       }
-
-       ret = 0;
-error:
-       __free_domain_allocs(&d, alloc_state, cpu_map);
-       return ret;
-}
-
-static cpumask_var_t *doms_cur;        /* current sched domains */
-static int ndoms_cur;          /* number of sched domains in 'doms_cur' */
-static struct sched_domain_attr *dattr_cur;
-                               /* attribues of custom domains in 'doms_cur' */
-
-/*
- * Special case: If a kmalloc of a doms_cur partition (array of
- * cpumask) fails, then fallback to a single sched domain,
- * as determined by the single cpumask fallback_doms.
- */
-static cpumask_var_t fallback_doms;
-
-/*
- * arch_update_cpu_topology lets virtualized architectures update the
- * cpu core maps. It is supposed to return 1 if the topology changed
- * or 0 if it stayed the same.
- */
-int __weak arch_update_cpu_topology(void)
-{
-       return 0;
-}
-
-cpumask_var_t *alloc_sched_domains(unsigned int ndoms)
-{
-       int i;
-       cpumask_var_t *doms;
-
-       doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL);
-       if (!doms)
-               return NULL;
-       for (i = 0; i < ndoms; i++) {
-               if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) {
-                       free_sched_domains(doms, i);
-                       return NULL;
-               }
-       }
-       return doms;
-}
-
-void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms)
-{
-       unsigned int i;
-       for (i = 0; i < ndoms; i++)
-               free_cpumask_var(doms[i]);
-       kfree(doms);
-}
-
-/*
- * Set up scheduler domains and groups. Callers must hold the hotplug lock.
- * For now this just excludes isolated cpus, but could be used to
- * exclude other special cases in the future.
- */
-static int init_sched_domains(const struct cpumask *cpu_map)
-{
-       int err;
-
-       arch_update_cpu_topology();
-       ndoms_cur = 1;
-       doms_cur = alloc_sched_domains(ndoms_cur);
-       if (!doms_cur)
-               doms_cur = &fallback_doms;
-       cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map);
-       err = build_sched_domains(doms_cur[0], NULL);
-       register_sched_domain_sysctl();
-
-       return err;
-}
-
-/*
- * Detach sched domains from a group of cpus specified in cpu_map
- * These cpus will now be attached to the NULL domain
- */
-static void detach_destroy_domains(const struct cpumask *cpu_map)
-{
-       int i;
-
-       rcu_read_lock();
-       for_each_cpu(i, cpu_map)
-               cpu_attach_domain(NULL, &def_root_domain, i);
-       rcu_read_unlock();
-}
-
-/* handle null as "default" */
-static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
-                       struct sched_domain_attr *new, int idx_new)
-{
-       struct sched_domain_attr tmp;
-
-       /* fast path */
-       if (!new && !cur)
-               return 1;
-
-       tmp = SD_ATTR_INIT;
-       return !memcmp(cur ? (cur + idx_cur) : &tmp,
-                       new ? (new + idx_new) : &tmp,
-                       sizeof(struct sched_domain_attr));
-}
-
-/*
- * Partition sched domains as specified by the 'ndoms_new'
- * cpumasks in the array doms_new[] of cpumasks. This compares
- * doms_new[] to the current sched domain partitioning, doms_cur[].
- * It destroys each deleted domain and builds each new domain.
- *
- * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'.
- * The masks don't intersect (don't overlap.) We should setup one
- * sched domain for each mask. CPUs not in any of the cpumasks will
- * not be load balanced. If the same cpumask appears both in the
- * current 'doms_cur' domains and in the new 'doms_new', we can leave
- * it as it is.
- *
- * The passed in 'doms_new' should be allocated using
- * alloc_sched_domains.  This routine takes ownership of it and will
- * free_sched_domains it when done with it. If the caller failed the
- * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1,
- * and partition_sched_domains() will fallback to the single partition
- * 'fallback_doms', it also forces the domains to be rebuilt.
- *
- * If doms_new == NULL it will be replaced with cpu_online_mask.
- * ndoms_new == 0 is a special case for destroying existing domains,
- * and it will not create the default domain.
- *
- * Call with hotplug lock held
- */
-void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
-                            struct sched_domain_attr *dattr_new)
-{
-       int i, j, n;
-       int new_topology;
-
-       mutex_lock(&sched_domains_mutex);
-
-       /* always unregister in case we don't destroy any domains */
-       unregister_sched_domain_sysctl();
-
-       /* Let architecture update cpu core mappings. */
-       new_topology = arch_update_cpu_topology();
-
-       n = doms_new ? ndoms_new : 0;
-
-       /* Destroy deleted domains */
-       for (i = 0; i < ndoms_cur; i++) {
-               for (j = 0; j < n && !new_topology; j++) {
-                       if (cpumask_equal(doms_cur[i], doms_new[j])
-                           && dattrs_equal(dattr_cur, i, dattr_new, j))
-                               goto match1;
-               }
-               /* no match - a current sched domain not in new doms_new[] */
-               detach_destroy_domains(doms_cur[i]);
-match1:
-               ;
-       }
-
-       n = ndoms_cur;
-       if (doms_new == NULL) {
-               n = 0;
-               doms_new = &fallback_doms;
-               cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map);
-               WARN_ON_ONCE(dattr_new);
-       }
-
-       /* Build new domains */
-       for (i = 0; i < ndoms_new; i++) {
-               for (j = 0; j < n && !new_topology; j++) {
-                       if (cpumask_equal(doms_new[i], doms_cur[j])
-                           && dattrs_equal(dattr_new, i, dattr_cur, j))
-                               goto match2;
-               }
-               /* no match - add a new doms_new */
-               build_sched_domains(doms_new[i], dattr_new ? dattr_new + i : NULL);
-match2:
-               ;
-       }
-
-       /* Remember the new sched domains */
-       if (doms_cur != &fallback_doms)
-               free_sched_domains(doms_cur, ndoms_cur);
-       kfree(dattr_cur);       /* kfree(NULL) is safe */
-       doms_cur = doms_new;
-       dattr_cur = dattr_new;
-       ndoms_cur = ndoms_new;
-
-       register_sched_domain_sysctl();
-
-       mutex_unlock(&sched_domains_mutex);
-}
-
-static int num_cpus_frozen;    /* used to mark begin/end of suspend/resume */
-
-/*
- * Update cpusets according to cpu_active mask.  If cpusets are
- * disabled, cpuset_update_active_cpus() becomes a simple wrapper
- * around partition_sched_domains().
- *
- * If we come here as part of a suspend/resume, don't touch cpusets because we
- * want to restore it back to its original state upon resume anyway.
- */
-static void cpuset_cpu_active(void)
+/*
+ * Update cpusets according to cpu_active mask.  If cpusets are
+ * disabled, cpuset_update_active_cpus() becomes a simple wrapper
+ * around partition_sched_domains().
+ *
+ * If we come here as part of a suspend/resume, don't touch cpusets because we
+ * want to restore it back to its original state upon resume anyway.
+ */
+static void cpuset_cpu_active(void)
 {
        if (cpuhp_tasks_frozen) {
                /*
@@ -7352,7 +5759,7 @@ int sched_cpu_activate(unsigned int cpu)
         * Put the rq online, if not already. This happens:
         *
         * 1) In the early boot process, because we build the real domains
-        *    after all cpus have been brought up.
+        *    after all CPUs have been brought up.
         *
         * 2) At runtime, if cpuset_cpu_active() fails to rebuild the
         *    domains.
@@ -7467,7 +5874,7 @@ void __init sched_init_smp(void)
 
        /*
         * There's no userspace yet to cause hotplug operations; hence all the
-        * cpu masks are stable and all blatant races in the below code cannot
+        * CPU masks are stable and all blatant races in the below code cannot
         * happen.
         */
        mutex_lock(&sched_domains_mutex);
@@ -7487,6 +5894,7 @@ void __init sched_init_smp(void)
        init_sched_dl_class();
 
        sched_init_smt();
+       sched_clock_init_late();
 
        sched_smp_initialized = true;
 }
@@ -7502,6 +5910,7 @@ early_initcall(migration_init);
 void __init sched_init_smp(void)
 {
        sched_init_granularity();
+       sched_clock_init_late();
 }
 #endif /* CONFIG_SMP */
 
@@ -7545,6 +5954,8 @@ void __init sched_init(void)
        int i, j;
        unsigned long alloc_size = 0, ptr;
 
+       sched_clock_init();
+
        for (i = 0; i < WAIT_TABLE_SIZE; i++)
                init_waitqueue_head(bit_wait_table + i);
 
@@ -7583,10 +5994,8 @@ void __init sched_init(void)
        }
 #endif /* CONFIG_CPUMASK_OFFSTACK */
 
-       init_rt_bandwidth(&def_rt_bandwidth,
-                       global_rt_period(), global_rt_runtime());
-       init_dl_bandwidth(&def_dl_bandwidth,
-                       global_rt_period(), global_rt_runtime());
+       init_rt_bandwidth(&def_rt_bandwidth, global_rt_period(), global_rt_runtime());
+       init_dl_bandwidth(&def_dl_bandwidth, global_rt_period(), global_rt_runtime());
 
 #ifdef CONFIG_SMP
        init_defrootdomain();
@@ -7622,18 +6031,18 @@ void __init sched_init(void)
                INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
                rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
                /*
-                * How much cpu bandwidth does root_task_group get?
+                * How much CPU bandwidth does root_task_group get?
                 *
                 * In case of task-groups formed thr' the cgroup filesystem, it
-                * gets 100% of the cpu resources in the system. This overall
-                * system cpu resource is divided among the tasks of
+                * gets 100% of the CPU resources in the system. This overall
+                * system CPU resource is divided among the tasks of
                 * root_task_group and its child task-groups in a fair manner,
                 * based on each entity's (task or task-group's) weight
                 * (se->load.weight).
                 *
                 * In other words, if root_task_group has 10 tasks of weight
                 * 1024) and two child groups A0 and A1 (of weight 1024 each),
-                * then A0's share of the cpu resource is:
+                * then A0's share of the CPU resource is:
                 *
                 *      A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
                 *
@@ -7742,10 +6151,14 @@ EXPORT_SYMBOL(__might_sleep);
 
 void ___might_sleep(const char *file, int line, int preempt_offset)
 {
-       static unsigned long prev_jiffy;        /* ratelimiting */
+       /* Ratelimiting timestamp: */
+       static unsigned long prev_jiffy;
+
        unsigned long preempt_disable_ip;
 
-       rcu_sleep_check(); /* WARN_ON_ONCE() by default, no rate limit reqd. */
+       /* WARN_ON_ONCE() by default, no rate limit required: */
+       rcu_sleep_check();
+
        if ((preempt_count_equals(preempt_offset) && !irqs_disabled() &&
             !is_idle_task(current)) ||
            system_state != SYSTEM_RUNNING || oops_in_progress)
@@ -7754,7 +6167,7 @@ void ___might_sleep(const char *file, int line, int preempt_offset)
                return;
        prev_jiffy = jiffies;
 
-       /* Save this before calling printk(), since that will clobber it */
+       /* Save this before calling printk(), since that will clobber it: */
        preempt_disable_ip = get_preempt_disable_ip(current);
 
        printk(KERN_ERR
@@ -7833,7 +6246,7 @@ void normalize_rt_tasks(void)
  */
 
 /**
- * curr_task - return the current task for a given cpu.
+ * curr_task - return the current task for a given CPU.
  * @cpu: the processor in question.
  *
  * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
@@ -7849,13 +6262,13 @@ struct task_struct *curr_task(int cpu)
 
 #ifdef CONFIG_IA64
 /**
- * set_curr_task - set the current task for a given cpu.
+ * set_curr_task - set the current task for a given CPU.
  * @cpu: the processor in question.
  * @p: the task pointer to set.
  *
  * Description: This function must only be used when non-maskable interrupts
  * are serviced on a separate stack. It allows the architecture to switch the
- * notion of the current task on a cpu in a non-blocking manner. This function
+ * notion of the current task on a CPU in a non-blocking manner. This function
  * must be called with all CPU's synchronized, and interrupts disabled, the
  * and caller must save the original value of the current task (see
  * curr_task() above) and restore that value before reenabling interrupts and
@@ -7911,7 +6324,8 @@ void sched_online_group(struct task_group *tg, struct task_group *parent)
        spin_lock_irqsave(&task_group_lock, flags);
        list_add_rcu(&tg->list, &task_groups);
 
-       WARN_ON(!parent); /* root should already exist */
+       /* Root should already exist: */
+       WARN_ON(!parent);
 
        tg->parent = parent;
        INIT_LIST_HEAD(&tg->children);
@@ -7924,13 +6338,13 @@ void sched_online_group(struct task_group *tg, struct task_group *parent)
 /* rcu callback to free various structures associated with a task group */
 static void sched_free_group_rcu(struct rcu_head *rhp)
 {
-       /* now it should be safe to free those cfs_rqs */
+       /* Now it should be safe to free those cfs_rqs: */
        sched_free_group(container_of(rhp, struct task_group, rcu));
 }
 
 void sched_destroy_group(struct task_group *tg)
 {
-       /* wait for possible concurrent references to cfs_rqs complete */
+       /* Wait for possible concurrent references to cfs_rqs complete: */
        call_rcu(&tg->rcu, sched_free_group_rcu);
 }
 
@@ -7938,7 +6352,7 @@ void sched_offline_group(struct task_group *tg)
 {
        unsigned long flags;
 
-       /* end participation in shares distribution */
+       /* End participation in shares distribution: */
        unregister_fair_sched_group(tg);
 
        spin_lock_irqsave(&task_group_lock, flags);
@@ -7983,20 +6397,21 @@ void sched_move_task(struct task_struct *tsk)
        struct rq *rq;
 
        rq = task_rq_lock(tsk, &rf);
+       update_rq_clock(rq);
 
        running = task_current(rq, tsk);
        queued = task_on_rq_queued(tsk);
 
        if (queued)
                dequeue_task(rq, tsk, DEQUEUE_SAVE | DEQUEUE_MOVE);
-       if (unlikely(running))
+       if (running)
                put_prev_task(rq, tsk);
 
        sched_change_group(tsk, TASK_MOVE_GROUP);
 
        if (queued)
                enqueue_task(rq, tsk, ENQUEUE_RESTORE | ENQUEUE_MOVE);
-       if (unlikely(running))
+       if (running)
                set_curr_task(rq, tsk);
 
        task_rq_unlock(rq, tsk, &rf);
@@ -8366,11 +6781,14 @@ int sched_rr_handler(struct ctl_table *table, int write,
 
        mutex_lock(&mutex);
        ret = proc_dointvec(table, write, buffer, lenp, ppos);
-       /* make sure that internally we keep jiffies */
-       /* also, writing zero resets timeslice to default */
+       /*
+        * Make sure that internally we keep jiffies.
+        * Also, writing zero resets the timeslice to default:
+        */
        if (!ret && write) {
-               sched_rr_timeslice = sched_rr_timeslice <= 0 ?
-                       RR_TIMESLICE : msecs_to_jiffies(sched_rr_timeslice);
+               sched_rr_timeslice =
+                       sysctl_sched_rr_timeslice <= 0 ? RR_TIMESLICE :
+                       msecs_to_jiffies(sysctl_sched_rr_timeslice);
        }
        mutex_unlock(&mutex);
        return ret;
@@ -8431,6 +6849,7 @@ static void cpu_cgroup_fork(struct task_struct *task)
 
        rq = task_rq_lock(task, &rf);
 
+       update_rq_clock(rq);
        sched_change_group(task, TASK_SET_GROUP);
 
        task_rq_unlock(rq, task, &rf);
@@ -8550,9 +6969,11 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
        cfs_b->quota = quota;
 
        __refill_cfs_bandwidth_runtime(cfs_b);
-       /* restart the period timer (if active) to handle new period expiry */
+
+       /* Restart the period timer (if active) to handle new period expiry: */
        if (runtime_enabled)
                start_cfs_bandwidth(cfs_b);
+
        raw_spin_unlock_irq(&cfs_b->lock);
 
        for_each_online_cpu(i) {
@@ -8690,8 +7111,8 @@ static int tg_cfs_schedulable_down(struct task_group *tg, void *data)
                parent_quota = parent_b->hierarchical_quota;
 
                /*
-                * ensure max(child_quota) <= parent_quota, inherit when no
-                * limit is set
+                * Ensure max(child_quota) <= parent_quota, inherit when no
+                * limit is set:
                 */
                if (quota == RUNTIME_INF)
                        quota = parent_quota;
@@ -8800,7 +7221,7 @@ static struct cftype cpu_files[] = {
                .write_u64 = cpu_rt_period_write_uint,
        },
 #endif
-       { }     /* terminate */
+       { }     /* Terminate */
 };
 
 struct cgroup_subsys cpu_cgrp_subsys = {
index 9add206b56082ee8ee35a878c9d9f87411f49359..f95ab29a45d0515d5651719143b38e335d366f4e 100644 (file)
@@ -297,7 +297,7 @@ static int cpuacct_stats_show(struct seq_file *sf, void *v)
        for (stat = 0; stat < CPUACCT_STAT_NSTATS; stat++) {
                seq_printf(sf, "%s %lld\n",
                           cpuacct_stat_desc[stat],
-                          (long long)cputime64_to_clock_t(val[stat]));
+                          (long long)nsec_to_clock_t(val[stat]));
        }
 
        return 0;
index 7700a9cba335059d7234649bbbf35dd113307698..2ecec3a4f1eeccf44bf9de57b0f8ad242c438c88 100644 (file)
@@ -4,6 +4,7 @@
 #include <linux/kernel_stat.h>
 #include <linux/static_key.h>
 #include <linux/context_tracking.h>
+#include <linux/cputime.h>
 #include "sched.h"
 #ifdef CONFIG_PARAVIRT
 #include <asm/paravirt.h>
@@ -44,6 +45,7 @@ void disable_sched_clock_irqtime(void)
 void irqtime_account_irq(struct task_struct *curr)
 {
        struct irqtime *irqtime = this_cpu_ptr(&cpu_irqtime);
+       u64 *cpustat = kcpustat_this_cpu->cpustat;
        s64 delta;
        int cpu;
 
@@ -61,49 +63,34 @@ void irqtime_account_irq(struct task_struct *curr)
         * in that case, so as not to confuse scheduler with a special task
         * that do not consume any time, but still wants to run.
         */
-       if (hardirq_count())
-               irqtime->hardirq_time += delta;
-       else if (in_serving_softirq() && curr != this_cpu_ksoftirqd())
-               irqtime->softirq_time += delta;
+       if (hardirq_count()) {
+               cpustat[CPUTIME_IRQ] += delta;
+               irqtime->tick_delta += delta;
+       } else if (in_serving_softirq() && curr != this_cpu_ksoftirqd()) {
+               cpustat[CPUTIME_SOFTIRQ] += delta;
+               irqtime->tick_delta += delta;
+       }
 
        u64_stats_update_end(&irqtime->sync);
 }
 EXPORT_SYMBOL_GPL(irqtime_account_irq);
 
-static cputime_t irqtime_account_update(u64 irqtime, int idx, cputime_t maxtime)
+static u64 irqtime_tick_accounted(u64 maxtime)
 {
-       u64 *cpustat = kcpustat_this_cpu->cpustat;
-       cputime_t irq_cputime;
-
-       irq_cputime = nsecs_to_cputime64(irqtime) - cpustat[idx];
-       irq_cputime = min(irq_cputime, maxtime);
-       cpustat[idx] += irq_cputime;
+       struct irqtime *irqtime = this_cpu_ptr(&cpu_irqtime);
+       u64 delta;
 
-       return irq_cputime;
-}
+       delta = min(irqtime->tick_delta, maxtime);
+       irqtime->tick_delta -= delta;
 
-static cputime_t irqtime_account_hi_update(cputime_t maxtime)
-{
-       return irqtime_account_update(__this_cpu_read(cpu_irqtime.hardirq_time),
-                                     CPUTIME_IRQ, maxtime);
-}
-
-static cputime_t irqtime_account_si_update(cputime_t maxtime)
-{
-       return irqtime_account_update(__this_cpu_read(cpu_irqtime.softirq_time),
-                                     CPUTIME_SOFTIRQ, maxtime);
+       return delta;
 }
 
 #else /* CONFIG_IRQ_TIME_ACCOUNTING */
 
 #define sched_clock_irqtime    (0)
 
-static cputime_t irqtime_account_hi_update(cputime_t dummy)
-{
-       return 0;
-}
-
-static cputime_t irqtime_account_si_update(cputime_t dummy)
+static u64 irqtime_tick_accounted(u64 dummy)
 {
        return 0;
 }
@@ -129,7 +116,7 @@ static inline void task_group_account_field(struct task_struct *p, int index,
  * @p: the process that the cpu time gets accounted to
  * @cputime: the cpu time spent in user space since the last update
  */
-void account_user_time(struct task_struct *p, cputime_t cputime)
+void account_user_time(struct task_struct *p, u64 cputime)
 {
        int index;
 
@@ -140,7 +127,7 @@ void account_user_time(struct task_struct *p, cputime_t cputime)
        index = (task_nice(p) > 0) ? CPUTIME_NICE : CPUTIME_USER;
 
        /* Add user time to cpustat. */
-       task_group_account_field(p, index, (__force u64) cputime);
+       task_group_account_field(p, index, cputime);
 
        /* Account for user time used */
        acct_account_cputime(p);
@@ -151,7 +138,7 @@ void account_user_time(struct task_struct *p, cputime_t cputime)
  * @p: the process that the cpu time gets accounted to
  * @cputime: the cpu time spent in virtual machine since the last update
  */
-static void account_guest_time(struct task_struct *p, cputime_t cputime)
+void account_guest_time(struct task_struct *p, u64 cputime)
 {
        u64 *cpustat = kcpustat_this_cpu->cpustat;
 
@@ -162,11 +149,11 @@ static void account_guest_time(struct task_struct *p, cputime_t cputime)
 
        /* Add guest time to cpustat. */
        if (task_nice(p) > 0) {
-               cpustat[CPUTIME_NICE] += (__force u64) cputime;
-               cpustat[CPUTIME_GUEST_NICE] += (__force u64) cputime;
+               cpustat[CPUTIME_NICE] += cputime;
+               cpustat[CPUTIME_GUEST_NICE] += cputime;
        } else {
-               cpustat[CPUTIME_USER] += (__force u64) cputime;
-               cpustat[CPUTIME_GUEST] += (__force u64) cputime;
+               cpustat[CPUTIME_USER] += cputime;
+               cpustat[CPUTIME_GUEST] += cputime;
        }
 }
 
@@ -176,15 +163,15 @@ static void account_guest_time(struct task_struct *p, cputime_t cputime)
  * @cputime: the cpu time spent in kernel space since the last update
  * @index: pointer to cpustat field that has to be updated
  */
-static inline
-void __account_system_time(struct task_struct *p, cputime_t cputime, int index)
+void account_system_index_time(struct task_struct *p,
+                              u64 cputime, enum cpu_usage_stat index)
 {
        /* Add system time to process. */
        p->stime += cputime;
        account_group_system_time(p, cputime);
 
        /* Add system time to cpustat. */
-       task_group_account_field(p, index, (__force u64) cputime);
+       task_group_account_field(p, index, cputime);
 
        /* Account for system time used */
        acct_account_cputime(p);
@@ -196,8 +183,7 @@ void __account_system_time(struct task_struct *p, cputime_t cputime, int index)
  * @hardirq_offset: the offset to subtract from hardirq_count()
  * @cputime: the cpu time spent in kernel space since the last update
  */
-void account_system_time(struct task_struct *p, int hardirq_offset,
-                        cputime_t cputime)
+void account_system_time(struct task_struct *p, int hardirq_offset, u64 cputime)
 {
        int index;
 
@@ -213,33 +199,33 @@ void account_system_time(struct task_struct *p, int hardirq_offset,
        else
                index = CPUTIME_SYSTEM;
 
-       __account_system_time(p, cputime, index);
+       account_system_index_time(p, cputime, index);
 }
 
 /*
  * Account for involuntary wait time.
  * @cputime: the cpu time spent in involuntary wait
  */
-void account_steal_time(cputime_t cputime)
+void account_steal_time(u64 cputime)
 {
        u64 *cpustat = kcpustat_this_cpu->cpustat;
 
-       cpustat[CPUTIME_STEAL] += (__force u64) cputime;
+       cpustat[CPUTIME_STEAL] += cputime;
 }
 
 /*
  * Account for idle time.
  * @cputime: the cpu time spent in idle wait
  */
-void account_idle_time(cputime_t cputime)
+void account_idle_time(u64 cputime)
 {
        u64 *cpustat = kcpustat_this_cpu->cpustat;
        struct rq *rq = this_rq();
 
        if (atomic_read(&rq->nr_iowait) > 0)
-               cpustat[CPUTIME_IOWAIT] += (__force u64) cputime;
+               cpustat[CPUTIME_IOWAIT] += cputime;
        else
-               cpustat[CPUTIME_IDLE] += (__force u64) cputime;
+               cpustat[CPUTIME_IDLE] += cputime;
 }
 
 /*
@@ -247,21 +233,19 @@ void account_idle_time(cputime_t cputime)
  * ticks are not redelivered later. Due to that, this function may on
  * occasion account more time than the calling functions think elapsed.
  */
-static __always_inline cputime_t steal_account_process_time(cputime_t maxtime)
+static __always_inline u64 steal_account_process_time(u64 maxtime)
 {
 #ifdef CONFIG_PARAVIRT
        if (static_key_false(&paravirt_steal_enabled)) {
-               cputime_t steal_cputime;
                u64 steal;
 
                steal = paravirt_steal_clock(smp_processor_id());
                steal -= this_rq()->prev_steal_time;
+               steal = min(steal, maxtime);
+               account_steal_time(steal);
+               this_rq()->prev_steal_time += steal;
 
-               steal_cputime = min(nsecs_to_cputime(steal), maxtime);
-               account_steal_time(steal_cputime);
-               this_rq()->prev_steal_time += cputime_to_nsecs(steal_cputime);
-
-               return steal_cputime;
+               return steal;
        }
 #endif
        return 0;
@@ -270,9 +254,9 @@ static __always_inline cputime_t steal_account_process_time(cputime_t maxtime)
 /*
  * Account how much elapsed time was spent in steal, irq, or softirq time.
  */
-static inline cputime_t account_other_time(cputime_t max)
+static inline u64 account_other_time(u64 max)
 {
-       cputime_t accounted;
+       u64 accounted;
 
        /* Shall be converted to a lockdep-enabled lightweight check */
        WARN_ON_ONCE(!irqs_disabled());
@@ -280,10 +264,7 @@ static inline cputime_t account_other_time(cputime_t max)
        accounted = steal_account_process_time(max);
 
        if (accounted < max)
-               accounted += irqtime_account_hi_update(max - accounted);
-
-       if (accounted < max)
-               accounted += irqtime_account_si_update(max - accounted);
+               accounted += irqtime_tick_accounted(max - accounted);
 
        return accounted;
 }
@@ -315,7 +296,7 @@ static u64 read_sum_exec_runtime(struct task_struct *t)
 void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
 {
        struct signal_struct *sig = tsk->signal;
-       cputime_t utime, stime;
+       u64 utime, stime;
        struct task_struct *t;
        unsigned int seq, nextseq;
        unsigned long flags;
@@ -379,8 +360,7 @@ void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
 static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
                                         struct rq *rq, int ticks)
 {
-       u64 cputime = (__force u64) cputime_one_jiffy * ticks;
-       cputime_t other;
+       u64 other, cputime = TICK_NSEC * ticks;
 
        /*
         * When returning from idle, many ticks can get accounted at
@@ -392,6 +372,7 @@ static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
        other = account_other_time(ULONG_MAX);
        if (other >= cputime)
                return;
+
        cputime -= other;
 
        if (this_cpu_ksoftirqd() == p) {
@@ -400,7 +381,7 @@ static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
                 * So, we have to handle it separately here.
                 * Also, p->stime needs to be updated for ksoftirqd.
                 */
-               __account_system_time(p, cputime, CPUTIME_SOFTIRQ);
+               account_system_index_time(p, cputime, CPUTIME_SOFTIRQ);
        } else if (user_tick) {
                account_user_time(p, cputime);
        } else if (p == rq->idle) {
@@ -408,7 +389,7 @@ static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
        } else if (p->flags & PF_VCPU) { /* System time or guest time */
                account_guest_time(p, cputime);
        } else {
-               __account_system_time(p, cputime, CPUTIME_SYSTEM);
+               account_system_index_time(p, cputime, CPUTIME_SYSTEM);
        }
 }
 
@@ -437,9 +418,7 @@ void vtime_common_task_switch(struct task_struct *prev)
        else
                vtime_account_system(prev);
 
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
-       vtime_account_user(prev);
-#endif
+       vtime_flush(prev);
        arch_vtime_task_switch(prev);
 }
 #endif
@@ -467,14 +446,14 @@ void vtime_account_irq_enter(struct task_struct *tsk)
 EXPORT_SYMBOL_GPL(vtime_account_irq_enter);
 #endif /* __ARCH_HAS_VTIME_ACCOUNT */
 
-void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
+void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
 {
        *ut = p->utime;
        *st = p->stime;
 }
 EXPORT_SYMBOL_GPL(task_cputime_adjusted);
 
-void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
+void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
 {
        struct task_cputime cputime;
 
@@ -491,7 +470,7 @@ void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime
  */
 void account_process_tick(struct task_struct *p, int user_tick)
 {
-       cputime_t cputime, steal;
+       u64 cputime, steal;
        struct rq *rq = this_rq();
 
        if (vtime_accounting_cpu_enabled())
@@ -502,7 +481,7 @@ void account_process_tick(struct task_struct *p, int user_tick)
                return;
        }
 
-       cputime = cputime_one_jiffy;
+       cputime = TICK_NSEC;
        steal = steal_account_process_time(ULONG_MAX);
 
        if (steal >= cputime)
@@ -524,14 +503,14 @@ void account_process_tick(struct task_struct *p, int user_tick)
  */
 void account_idle_ticks(unsigned long ticks)
 {
-       cputime_t cputime, steal;
+       u64 cputime, steal;
 
        if (sched_clock_irqtime) {
                irqtime_account_idle_ticks(ticks);
                return;
        }
 
-       cputime = jiffies_to_cputime(ticks);
+       cputime = ticks * TICK_NSEC;
        steal = steal_account_process_time(ULONG_MAX);
 
        if (steal >= cputime)
@@ -545,7 +524,7 @@ void account_idle_ticks(unsigned long ticks)
  * Perform (stime * rtime) / total, but avoid multiplication overflow by
  * loosing precision when the numbers are big.
  */
-static cputime_t scale_stime(u64 stime, u64 rtime, u64 total)
+static u64 scale_stime(u64 stime, u64 rtime, u64 total)
 {
        u64 scaled;
 
@@ -582,7 +561,7 @@ drop_precision:
         * followed by a 64/32->64 divide.
         */
        scaled = div_u64((u64) (u32) stime * (u64) (u32) rtime, (u32)total);
-       return (__force cputime_t) scaled;
+       return scaled;
 }
 
 /*
@@ -607,14 +586,14 @@ drop_precision:
  */
 static void cputime_adjust(struct task_cputime *curr,
                           struct prev_cputime *prev,
-                          cputime_t *ut, cputime_t *st)
+                          u64 *ut, u64 *st)
 {
-       cputime_t rtime, stime, utime;
+       u64 rtime, stime, utime;
        unsigned long flags;
 
        /* Serialize concurrent callers such that we can honour our guarantees */
        raw_spin_lock_irqsave(&prev->lock, flags);
-       rtime = nsecs_to_cputime(curr->sum_exec_runtime);
+       rtime = curr->sum_exec_runtime;
 
        /*
         * This is possible under two circumstances:
@@ -645,8 +624,7 @@ static void cputime_adjust(struct task_cputime *curr,
                goto update;
        }
 
-       stime = scale_stime((__force u64)stime, (__force u64)rtime,
-                           (__force u64)(stime + utime));
+       stime = scale_stime(stime, rtime, stime + utime);
 
 update:
        /*
@@ -679,7 +657,7 @@ out:
        raw_spin_unlock_irqrestore(&prev->lock, flags);
 }
 
-void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
+void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
 {
        struct task_cputime cputime = {
                .sum_exec_runtime = p->se.sum_exec_runtime,
@@ -690,7 +668,7 @@ void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
 }
 EXPORT_SYMBOL_GPL(task_cputime_adjusted);
 
-void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
+void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
 {
        struct task_cputime cputime;
 
@@ -700,20 +678,20 @@ void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime
 #endif /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
 
 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
-static cputime_t vtime_delta(struct task_struct *tsk)
+static u64 vtime_delta(struct task_struct *tsk)
 {
        unsigned long now = READ_ONCE(jiffies);
 
        if (time_before(now, (unsigned long)tsk->vtime_snap))
                return 0;
 
-       return jiffies_to_cputime(now - tsk->vtime_snap);
+       return jiffies_to_nsecs(now - tsk->vtime_snap);
 }
 
-static cputime_t get_vtime_delta(struct task_struct *tsk)
+static u64 get_vtime_delta(struct task_struct *tsk)
 {
        unsigned long now = READ_ONCE(jiffies);
-       cputime_t delta, other;
+       u64 delta, other;
 
        /*
         * Unlike tick based timing, vtime based timing never has lost
@@ -722,7 +700,7 @@ static cputime_t get_vtime_delta(struct task_struct *tsk)
         * elapsed time. Limit account_other_time to prevent rounding
         * errors from causing elapsed vtime to go negative.
         */
-       delta = jiffies_to_cputime(now - tsk->vtime_snap);
+       delta = jiffies_to_nsecs(now - tsk->vtime_snap);
        other = account_other_time(delta);
        WARN_ON_ONCE(tsk->vtime_snap_whence == VTIME_INACTIVE);
        tsk->vtime_snap = now;
@@ -732,9 +710,7 @@ static cputime_t get_vtime_delta(struct task_struct *tsk)
 
 static void __vtime_account_system(struct task_struct *tsk)
 {
-       cputime_t delta_cpu = get_vtime_delta(tsk);
-
-       account_system_time(tsk, irq_count(), delta_cpu);
+       account_system_time(tsk, irq_count(), get_vtime_delta(tsk));
 }
 
 void vtime_account_system(struct task_struct *tsk)
@@ -749,14 +725,10 @@ void vtime_account_system(struct task_struct *tsk)
 
 void vtime_account_user(struct task_struct *tsk)
 {
-       cputime_t delta_cpu;
-
        write_seqcount_begin(&tsk->vtime_seqcount);
        tsk->vtime_snap_whence = VTIME_SYS;
-       if (vtime_delta(tsk)) {
-               delta_cpu = get_vtime_delta(tsk);
-               account_user_time(tsk, delta_cpu);
-       }
+       if (vtime_delta(tsk))
+               account_user_time(tsk, get_vtime_delta(tsk));
        write_seqcount_end(&tsk->vtime_seqcount);
 }
 
@@ -797,9 +769,7 @@ EXPORT_SYMBOL_GPL(vtime_guest_exit);
 
 void vtime_account_idle(struct task_struct *tsk)
 {
-       cputime_t delta_cpu = get_vtime_delta(tsk);
-
-       account_idle_time(delta_cpu);
+       account_idle_time(get_vtime_delta(tsk));
 }
 
 void arch_vtime_task_switch(struct task_struct *prev)
@@ -826,10 +796,10 @@ void vtime_init_idle(struct task_struct *t, int cpu)
        local_irq_restore(flags);
 }
 
-cputime_t task_gtime(struct task_struct *t)
+u64 task_gtime(struct task_struct *t)
 {
        unsigned int seq;
-       cputime_t gtime;
+       u64 gtime;
 
        if (!vtime_accounting_enabled())
                return t->gtime;
@@ -851,9 +821,9 @@ cputime_t task_gtime(struct task_struct *t)
  * add up the pending nohz execution time since the last
  * cputime snapshot.
  */
-void task_cputime(struct task_struct *t, cputime_t *utime, cputime_t *stime)
+void task_cputime(struct task_struct *t, u64 *utime, u64 *stime)
 {
-       cputime_t delta;
+       u64 delta;
        unsigned int seq;
 
        if (!vtime_accounting_enabled()) {
index 70ef2b1901e4914ca47e0a613a8b817ec452fa7f..27737f34757d38751b39901e36409095c0be9c56 100644 (file)
@@ -663,9 +663,9 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
                 * Nothing relies on rq->lock after this, so its safe to drop
                 * rq->lock.
                 */
-               lockdep_unpin_lock(&rq->lock, rf.cookie);
+               rq_unpin_lock(rq, &rf);
                push_dl_task(rq);
-               lockdep_repin_lock(&rq->lock, rf.cookie);
+               rq_repin_lock(rq, &rf);
        }
 #endif
 
@@ -1118,7 +1118,7 @@ static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq,
 }
 
 struct task_struct *
-pick_next_task_dl(struct rq *rq, struct task_struct *prev, struct pin_cookie cookie)
+pick_next_task_dl(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
 {
        struct sched_dl_entity *dl_se;
        struct task_struct *p;
@@ -1133,9 +1133,9 @@ pick_next_task_dl(struct rq *rq, struct task_struct *prev, struct pin_cookie coo
                 * disabled avoiding further scheduler activity on it and we're
                 * being very careful to re-start the picking loop.
                 */
-               lockdep_unpin_lock(&rq->lock, cookie);
+               rq_unpin_lock(rq, rf);
                pull_dl_task(rq);
-               lockdep_repin_lock(&rq->lock, cookie);
+               rq_repin_lock(rq, rf);
                /*
                 * pull_dl_task() can drop (and re-acquire) rq->lock; this
                 * means a stop task can slip in, in which case we need to
@@ -1729,12 +1729,11 @@ static void switched_to_dl(struct rq *rq, struct task_struct *p)
 #ifdef CONFIG_SMP
                if (tsk_nr_cpus_allowed(p) > 1 && rq->dl.overloaded)
                        queue_push_tasks(rq);
-#else
+#endif
                if (dl_task(rq->curr))
                        check_preempt_curr_dl(rq, p, 0);
                else
                        resched_curr(rq);
-#endif
        }
 }
 
index fa178b62ea79b53e3cbf37d78d65699e145d6b98..109adc0e9cb990d639fed48815890aed5d86e857 100644 (file)
@@ -953,6 +953,10 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
 #endif
        P(policy);
        P(prio);
+       if (p->policy == SCHED_DEADLINE) {
+               P(dl.runtime);
+               P(dl.deadline);
+       }
 #undef PN_SCHEDSTAT
 #undef PN
 #undef __PN
index 6559d197e08a5be3809a2176c8d2fdb52b38389d..274c747a01ce4862307f4a97286db68e6a753824 100644 (file)
@@ -2657,6 +2657,18 @@ static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
        if (tg_weight)
                shares /= tg_weight;
 
+       /*
+        * MIN_SHARES has to be unscaled here to support per-CPU partitioning
+        * of a group with small tg->shares value. It is a floor value which is
+        * assigned as a minimum load.weight to the sched_entity representing
+        * the group on a CPU.
+        *
+        * E.g. on 64-bit for a group with tg->shares of scale_load(15)=15*1024
+        * on an 8-core system with 8 tasks each runnable on one CPU shares has
+        * to be 15*1024*1/8=1920 instead of scale_load(MIN_SHARES)=2*1024. In
+        * case no task is runnable on a CPU MIN_SHARES=2 should be returned
+        * instead of 0.
+        */
        if (shares < MIN_SHARES)
                shares = MIN_SHARES;
        if (shares > tg->shares)
@@ -2689,16 +2701,20 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
 
 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
 
-static void update_cfs_shares(struct cfs_rq *cfs_rq)
+static void update_cfs_shares(struct sched_entity *se)
 {
+       struct cfs_rq *cfs_rq = group_cfs_rq(se);
        struct task_group *tg;
-       struct sched_entity *se;
        long shares;
 
-       tg = cfs_rq->tg;
-       se = tg->se[cpu_of(rq_of(cfs_rq))];
-       if (!se || throttled_hierarchy(cfs_rq))
+       if (!cfs_rq)
+               return;
+
+       if (throttled_hierarchy(cfs_rq))
                return;
+
+       tg = cfs_rq->tg;
+
 #ifndef CONFIG_SMP
        if (likely(se->load.weight == tg->shares))
                return;
@@ -2707,8 +2723,9 @@ static void update_cfs_shares(struct cfs_rq *cfs_rq)
 
        reweight_entity(cfs_rq_of(se), se, shares);
 }
+
 #else /* CONFIG_FAIR_GROUP_SCHED */
-static inline void update_cfs_shares(struct cfs_rq *cfs_rq)
+static inline void update_cfs_shares(struct sched_entity *se)
 {
 }
 #endif /* CONFIG_FAIR_GROUP_SCHED */
@@ -3424,7 +3441,7 @@ static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq)
        return cfs_rq->avg.load_avg;
 }
 
-static int idle_balance(struct rq *this_rq);
+static int idle_balance(struct rq *this_rq, struct rq_flags *rf);
 
 #else /* CONFIG_SMP */
 
@@ -3453,7 +3470,7 @@ attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
 static inline void
 detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
 
-static inline int idle_balance(struct rq *rq)
+static inline int idle_balance(struct rq *rq, struct rq_flags *rf)
 {
        return 0;
 }
@@ -3582,10 +3599,18 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
        if (renorm && !curr)
                se->vruntime += cfs_rq->min_vruntime;
 
+       /*
+        * When enqueuing a sched_entity, we must:
+        *   - Update loads to have both entity and cfs_rq synced with now.
+        *   - Add its load to cfs_rq->runnable_avg
+        *   - For group_entity, update its weight to reflect the new share of
+        *     its group cfs_rq
+        *   - Add its new weight to cfs_rq->load.weight
+        */
        update_load_avg(se, UPDATE_TG);
        enqueue_entity_load_avg(cfs_rq, se);
+       update_cfs_shares(se);
        account_entity_enqueue(cfs_rq, se);
-       update_cfs_shares(cfs_rq);
 
        if (flags & ENQUEUE_WAKEUP)
                place_entity(cfs_rq, se, 0);
@@ -3657,6 +3682,15 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
         * Update run-time statistics of the 'current'.
         */
        update_curr(cfs_rq);
+
+       /*
+        * When dequeuing a sched_entity, we must:
+        *   - Update loads to have both entity and cfs_rq synced with now.
+        *   - Substract its load from the cfs_rq->runnable_avg.
+        *   - Substract its previous weight from cfs_rq->load.weight.
+        *   - For group entity, update its weight to reflect the new share
+        *     of its group cfs_rq.
+        */
        update_load_avg(se, UPDATE_TG);
        dequeue_entity_load_avg(cfs_rq, se);
 
@@ -3681,7 +3715,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
        /* return excess runtime on last dequeue */
        return_cfs_rq_runtime(cfs_rq);
 
-       update_cfs_shares(cfs_rq);
+       update_cfs_shares(se);
 
        /*
         * Now advance min_vruntime if @se was the entity holding it back,
@@ -3864,7 +3898,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
         * Ensure that runnable average is periodically updated.
         */
        update_load_avg(curr, UPDATE_TG);
-       update_cfs_shares(cfs_rq);
+       update_cfs_shares(curr);
 
 #ifdef CONFIG_SCHED_HRTICK
        /*
@@ -4761,7 +4795,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
                        break;
 
                update_load_avg(se, UPDATE_TG);
-               update_cfs_shares(cfs_rq);
+               update_cfs_shares(se);
        }
 
        if (!se)
@@ -4820,7 +4854,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
                        break;
 
                update_load_avg(se, UPDATE_TG);
-               update_cfs_shares(cfs_rq);
+               update_cfs_shares(se);
        }
 
        if (!se)
@@ -6213,7 +6247,7 @@ preempt:
 }
 
 static struct task_struct *
-pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct pin_cookie cookie)
+pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
 {
        struct cfs_rq *cfs_rq = &rq->cfs;
        struct sched_entity *se;
@@ -6320,15 +6354,8 @@ simple:
        return p;
 
 idle:
-       /*
-        * This is OK, because current is on_cpu, which avoids it being picked
-        * for load-balance and preemption/IRQs are still disabled avoiding
-        * further scheduler activity on it and we're being very careful to
-        * re-start the picking loop.
-        */
-       lockdep_unpin_lock(&rq->lock, cookie);
-       new_tasks = idle_balance(rq);
-       lockdep_repin_lock(&rq->lock, cookie);
+       new_tasks = idle_balance(rq, rf);
+
        /*
         * Because idle_balance() releases (and re-acquires) rq->lock, it is
         * possible for any higher priority task to appear. In that case we
@@ -8077,6 +8104,7 @@ redo:
 
 more_balance:
                raw_spin_lock_irqsave(&busiest->lock, flags);
+               update_rq_clock(busiest);
 
                /*
                 * cur_ld_moved - load moved in current iteration
@@ -8297,7 +8325,7 @@ update_next_balance(struct sched_domain *sd, unsigned long *next_balance)
  * idle_balance is called by schedule() if this_cpu is about to become
  * idle. Attempts to pull tasks from other CPUs.
  */
-static int idle_balance(struct rq *this_rq)
+static int idle_balance(struct rq *this_rq, struct rq_flags *rf)
 {
        unsigned long next_balance = jiffies + HZ;
        int this_cpu = this_rq->cpu;
@@ -8311,6 +8339,14 @@ static int idle_balance(struct rq *this_rq)
         */
        this_rq->idle_stamp = rq_clock(this_rq);
 
+       /*
+        * This is OK, because current is on_cpu, which avoids it being picked
+        * for load-balance and preemption/IRQs are still disabled avoiding
+        * further scheduler activity on it and we're being very careful to
+        * re-start the picking loop.
+        */
+       rq_unpin_lock(this_rq, rf);
+
        if (this_rq->avg_idle < sysctl_sched_migration_cost ||
            !this_rq->rd->overload) {
                rcu_read_lock();
@@ -8388,6 +8424,8 @@ out:
        if (pulled_task)
                this_rq->idle_stamp = 0;
 
+       rq_repin_lock(this_rq, rf);
+
        return pulled_task;
 }
 
@@ -8443,6 +8481,7 @@ static int active_load_balance_cpu_stop(void *data)
                };
 
                schedstat_inc(sd->alb_count);
+               update_rq_clock(busiest_rq);
 
                p = detach_one_task(&env);
                if (p) {
@@ -9264,6 +9303,7 @@ void online_fair_sched_group(struct task_group *tg)
                se = tg->se[i];
 
                raw_spin_lock_irq(&rq->lock);
+               update_rq_clock(rq);
                attach_entity_cfs_rq(se);
                sync_throttle(tg, i);
                raw_spin_unlock_irq(&rq->lock);
@@ -9356,8 +9396,10 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares)
 
                /* Possible calls to update_curr() need rq clock */
                update_rq_clock(rq);
-               for_each_sched_entity(se)
-                       update_cfs_shares(group_cfs_rq(se));
+               for_each_sched_entity(se) {
+                       update_load_avg(se, UPDATE_TG);
+                       update_cfs_shares(se);
+               }
                raw_spin_unlock_irqrestore(&rq->lock, flags);
        }
 
index 5405d3feb112f54e3b4dabecbe573d59fee7a160..0c00172db63e7fa767179c8d6ac31c7573d5870d 100644 (file)
@@ -24,7 +24,7 @@ static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int fl
 }
 
 static struct task_struct *
-pick_next_task_idle(struct rq *rq, struct task_struct *prev, struct pin_cookie cookie)
+pick_next_task_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
 {
        put_prev_task(rq, prev);
        update_idle_core(rq);
index 2516b8df6dbbd8c199854c76a9aa27c0126ba702..e8836cfc4cdbeef14564cea1faa6c6f0f4e4457a 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/irq_work.h>
 
 int sched_rr_timeslice = RR_TIMESLICE;
+int sysctl_sched_rr_timeslice = (MSEC_PER_SEC / HZ) * RR_TIMESLICE;
 
 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
 
@@ -1523,7 +1524,7 @@ static struct task_struct *_pick_next_task_rt(struct rq *rq)
 }
 
 static struct task_struct *
-pick_next_task_rt(struct rq *rq, struct task_struct *prev, struct pin_cookie cookie)
+pick_next_task_rt(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
 {
        struct task_struct *p;
        struct rt_rq *rt_rq = &rq->rt;
@@ -1535,9 +1536,9 @@ pick_next_task_rt(struct rq *rq, struct task_struct *prev, struct pin_cookie coo
                 * disabled avoiding further scheduler activity on it and we're
                 * being very careful to re-start the picking loop.
                 */
-               lockdep_unpin_lock(&rq->lock, cookie);
+               rq_unpin_lock(rq, rf);
                pull_rt_task(rq);
-               lockdep_repin_lock(&rq->lock, cookie);
+               rq_repin_lock(rq, rf);
                /*
                 * pull_rt_task() can drop (and re-acquire) rq->lock; this
                 * means a dl or stop task can slip in, in which case we need
@@ -2198,10 +2199,9 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p)
 #ifdef CONFIG_SMP
                if (tsk_nr_cpus_allowed(p) > 1 && rq->rt.overloaded)
                        queue_push_tasks(rq);
-#else
+#endif /* CONFIG_SMP */
                if (p->prio < rq->curr->prio)
                        resched_curr(rq);
-#endif /* CONFIG_SMP */
        }
 }
 
@@ -2246,6 +2246,7 @@ prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
        }
 }
 
+#ifdef CONFIG_POSIX_TIMERS
 static void watchdog(struct rq *rq, struct task_struct *p)
 {
        unsigned long soft, hard;
@@ -2267,6 +2268,9 @@ static void watchdog(struct rq *rq, struct task_struct *p)
                        p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
        }
 }
+#else
+static inline void watchdog(struct rq *rq, struct task_struct *p) { }
+#endif
 
 static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
 {
index 7b34c7826ca5952be8701b58205c7b481cad11d2..71b10a9b73cfe290b2545dfd3cd10a49d0af2cc9 100644 (file)
@@ -4,6 +4,7 @@
 #include <linux/sched/rt.h>
 #include <linux/u64_stats_sync.h>
 #include <linux/sched/deadline.h>
+#include <linux/kernel_stat.h>
 #include <linux/binfmts.h>
 #include <linux/mutex.h>
 #include <linux/spinlock.h>
@@ -222,7 +223,7 @@ bool __dl_overflow(struct dl_bw *dl_b, int cpus, u64 old_bw, u64 new_bw)
               dl_b->bw * cpus < dl_b->total_bw - old_bw + new_bw;
 }
 
-extern struct mutex sched_domains_mutex;
+extern void init_dl_bw(struct dl_bw *dl_b);
 
 #ifdef CONFIG_CGROUP_SCHED
 
@@ -583,6 +584,13 @@ struct root_domain {
 };
 
 extern struct root_domain def_root_domain;
+extern struct mutex sched_domains_mutex;
+extern cpumask_var_t fallback_doms;
+extern cpumask_var_t sched_domains_tmpmask;
+
+extern void init_defrootdomain(void);
+extern int init_sched_domains(const struct cpumask *cpu_map);
+extern void rq_attach_root(struct rq *rq, struct root_domain *rd);
 
 #endif /* CONFIG_SMP */
 
@@ -644,7 +652,7 @@ struct rq {
        unsigned long next_balance;
        struct mm_struct *prev_mm;
 
-       unsigned int clock_skip_update;
+       unsigned int clock_update_flags;
        u64 clock;
        u64 clock_task;
 
@@ -768,28 +776,110 @@ static inline u64 __rq_clock_broken(struct rq *rq)
        return READ_ONCE(rq->clock);
 }
 
+/*
+ * rq::clock_update_flags bits
+ *
+ * %RQCF_REQ_SKIP - will request skipping of clock update on the next
+ *  call to __schedule(). This is an optimisation to avoid
+ *  neighbouring rq clock updates.
+ *
+ * %RQCF_ACT_SKIP - is set from inside of __schedule() when skipping is
+ *  in effect and calls to update_rq_clock() are being ignored.
+ *
+ * %RQCF_UPDATED - is a debug flag that indicates whether a call has been
+ *  made to update_rq_clock() since the last time rq::lock was pinned.
+ *
+ * If inside of __schedule(), clock_update_flags will have been
+ * shifted left (a left shift is a cheap operation for the fast path
+ * to promote %RQCF_REQ_SKIP to %RQCF_ACT_SKIP), so you must use,
+ *
+ *     if (rq-clock_update_flags >= RQCF_UPDATED)
+ *
+ * to check if %RQCF_UPADTED is set. It'll never be shifted more than
+ * one position though, because the next rq_unpin_lock() will shift it
+ * back.
+ */
+#define RQCF_REQ_SKIP  0x01
+#define RQCF_ACT_SKIP  0x02
+#define RQCF_UPDATED   0x04
+
+static inline void assert_clock_updated(struct rq *rq)
+{
+       /*
+        * The only reason for not seeing a clock update since the
+        * last rq_pin_lock() is if we're currently skipping updates.
+        */
+       SCHED_WARN_ON(rq->clock_update_flags < RQCF_ACT_SKIP);
+}
+
 static inline u64 rq_clock(struct rq *rq)
 {
        lockdep_assert_held(&rq->lock);
+       assert_clock_updated(rq);
+
        return rq->clock;
 }
 
 static inline u64 rq_clock_task(struct rq *rq)
 {
        lockdep_assert_held(&rq->lock);
+       assert_clock_updated(rq);
+
        return rq->clock_task;
 }
 
-#define RQCF_REQ_SKIP  0x01
-#define RQCF_ACT_SKIP  0x02
-
 static inline void rq_clock_skip_update(struct rq *rq, bool skip)
 {
        lockdep_assert_held(&rq->lock);
        if (skip)
-               rq->clock_skip_update |= RQCF_REQ_SKIP;
+               rq->clock_update_flags |= RQCF_REQ_SKIP;
        else
-               rq->clock_skip_update &= ~RQCF_REQ_SKIP;
+               rq->clock_update_flags &= ~RQCF_REQ_SKIP;
+}
+
+struct rq_flags {
+       unsigned long flags;
+       struct pin_cookie cookie;
+#ifdef CONFIG_SCHED_DEBUG
+       /*
+        * A copy of (rq::clock_update_flags & RQCF_UPDATED) for the
+        * current pin context is stashed here in case it needs to be
+        * restored in rq_repin_lock().
+        */
+       unsigned int clock_update_flags;
+#endif
+};
+
+static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf)
+{
+       rf->cookie = lockdep_pin_lock(&rq->lock);
+
+#ifdef CONFIG_SCHED_DEBUG
+       rq->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP);
+       rf->clock_update_flags = 0;
+#endif
+}
+
+static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf)
+{
+#ifdef CONFIG_SCHED_DEBUG
+       if (rq->clock_update_flags > RQCF_ACT_SKIP)
+               rf->clock_update_flags = RQCF_UPDATED;
+#endif
+
+       lockdep_unpin_lock(&rq->lock, rf->cookie);
+}
+
+static inline void rq_repin_lock(struct rq *rq, struct rq_flags *rf)
+{
+       lockdep_repin_lock(&rq->lock, rf->cookie);
+
+#ifdef CONFIG_SCHED_DEBUG
+       /*
+        * Restore the value we stashed in @rf for this pin context.
+        */
+       rq->clock_update_flags |= rf->clock_update_flags;
+#endif
 }
 
 #ifdef CONFIG_NUMA
@@ -803,6 +893,16 @@ extern int sched_max_numa_distance;
 extern bool find_numa_distance(int distance);
 #endif
 
+#ifdef CONFIG_NUMA
+extern void sched_init_numa(void);
+extern void sched_domains_numa_masks_set(unsigned int cpu);
+extern void sched_domains_numa_masks_clear(unsigned int cpu);
+#else
+static inline void sched_init_numa(void) { }
+static inline void sched_domains_numa_masks_set(unsigned int cpu) { }
+static inline void sched_domains_numa_masks_clear(unsigned int cpu) { }
+#endif
+
 #ifdef CONFIG_NUMA_BALANCING
 /* The regions in numa_faults array from task_struct */
 enum numa_faults_stats {
@@ -969,7 +1069,7 @@ static inline void sched_ttwu_pending(void) { }
 #endif /* CONFIG_SMP */
 
 #include "stats.h"
-#include "auto_group.h"
+#include "autogroup.h"
 
 #ifdef CONFIG_CGROUP_SCHED
 
@@ -1245,7 +1345,7 @@ struct sched_class {
         */
        struct task_struct * (*pick_next_task) (struct rq *rq,
                                                struct task_struct *prev,
-                                               struct pin_cookie cookie);
+                                               struct rq_flags *rf);
        void (*put_prev_task) (struct rq *rq, struct task_struct *p);
 
 #ifdef CONFIG_SMP
@@ -1501,11 +1601,6 @@ static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) { }
 static inline void sched_avg_update(struct rq *rq) { }
 #endif
 
-struct rq_flags {
-       unsigned long flags;
-       struct pin_cookie cookie;
-};
-
 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
        __acquires(rq->lock);
 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
@@ -1515,7 +1610,7 @@ struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
 static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf)
        __releases(rq->lock)
 {
-       lockdep_unpin_lock(&rq->lock, rf->cookie);
+       rq_unpin_lock(rq, rf);
        raw_spin_unlock(&rq->lock);
 }
 
@@ -1524,7 +1619,7 @@ task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
        __releases(rq->lock)
        __releases(p->pi_lock)
 {
-       lockdep_unpin_lock(&rq->lock, rf->cookie);
+       rq_unpin_lock(rq, rf);
        raw_spin_unlock(&rq->lock);
        raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
 }
@@ -1674,6 +1769,10 @@ static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
                __release(rq2->lock);
 }
 
+extern void set_rq_online (struct rq *rq);
+extern void set_rq_offline(struct rq *rq);
+extern bool sched_smp_initialized;
+
 #else /* CONFIG_SMP */
 
 /*
@@ -1750,8 +1849,7 @@ static inline void nohz_balance_exit_idle(unsigned int cpu) { }
 
 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
 struct irqtime {
-       u64                     hardirq_time;
-       u64                     softirq_time;
+       u64                     tick_delta;
        u64                     irq_start_time;
        struct u64_stats_sync   sync;
 };
@@ -1761,12 +1859,13 @@ DECLARE_PER_CPU(struct irqtime, cpu_irqtime);
 static inline u64 irq_time_read(int cpu)
 {
        struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu);
+       u64 *cpustat = kcpustat_cpu(cpu).cpustat;
        unsigned int seq;
        u64 total;
 
        do {
                seq = __u64_stats_fetch_begin(&irqtime->sync);
-               total = irqtime->softirq_time + irqtime->hardirq_time;
+               total = cpustat[CPUTIME_SOFTIRQ] + cpustat[CPUTIME_IRQ];
        } while (__u64_stats_fetch_retry(&irqtime->sync, seq));
 
        return total;
index 34659a853505bec33603c423cd0ff71b989410a6..bf0da0aa0a14432e4c82a8a9aeda513301fd9de7 100644 (file)
@@ -172,18 +172,19 @@ sched_info_switch(struct rq *rq,
  */
 
 /**
- * cputimer_running - return true if cputimer is running
+ * get_running_cputimer - return &tsk->signal->cputimer if cputimer is running
  *
  * @tsk:       Pointer to target task.
  */
-static inline bool cputimer_running(struct task_struct *tsk)
-
+#ifdef CONFIG_POSIX_TIMERS
+static inline
+struct thread_group_cputimer *get_running_cputimer(struct task_struct *tsk)
 {
        struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
 
        /* Check if cputimer isn't running. This is accessed without locking. */
        if (!READ_ONCE(cputimer->running))
-               return false;
+               return NULL;
 
        /*
         * After we flush the task's sum_exec_runtime to sig->sum_sched_runtime
@@ -200,10 +201,17 @@ static inline bool cputimer_running(struct task_struct *tsk)
         * clock delta is behind the expiring timer value.
         */
        if (unlikely(!tsk->sighand))
-               return false;
+               return NULL;
 
-       return true;
+       return cputimer;
+}
+#else
+static inline
+struct thread_group_cputimer *get_running_cputimer(struct task_struct *tsk)
+{
+       return NULL;
 }
+#endif
 
 /**
  * account_group_user_time - Maintain utime for a thread group.
@@ -216,11 +224,11 @@ static inline bool cputimer_running(struct task_struct *tsk)
  * running CPU and update the utime field there.
  */
 static inline void account_group_user_time(struct task_struct *tsk,
-                                          cputime_t cputime)
+                                          u64 cputime)
 {
-       struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
+       struct thread_group_cputimer *cputimer = get_running_cputimer(tsk);
 
-       if (!cputimer_running(tsk))
+       if (!cputimer)
                return;
 
        atomic64_add(cputime, &cputimer->cputime_atomic.utime);
@@ -237,11 +245,11 @@ static inline void account_group_user_time(struct task_struct *tsk,
  * running CPU and update the stime field there.
  */
 static inline void account_group_system_time(struct task_struct *tsk,
-                                            cputime_t cputime)
+                                            u64 cputime)
 {
-       struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
+       struct thread_group_cputimer *cputimer = get_running_cputimer(tsk);
 
-       if (!cputimer_running(tsk))
+       if (!cputimer)
                return;
 
        atomic64_add(cputime, &cputimer->cputime_atomic.stime);
@@ -260,9 +268,9 @@ static inline void account_group_system_time(struct task_struct *tsk,
 static inline void account_group_exec_runtime(struct task_struct *tsk,
                                              unsigned long long ns)
 {
-       struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
+       struct thread_group_cputimer *cputimer = get_running_cputimer(tsk);
 
-       if (!cputimer_running(tsk))
+       if (!cputimer)
                return;
 
        atomic64_add(ns, &cputimer->cputime_atomic.sum_exec_runtime);
index 604297a08b3ae3064f990ec3c0b3a38384bb4f00..9f69fb6308537b2b7e6b9ba47f6cb17fd17d97ce 100644 (file)
@@ -24,7 +24,7 @@ check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags)
 }
 
 static struct task_struct *
-pick_next_task_stop(struct rq *rq, struct task_struct *prev, struct pin_cookie cookie)
+pick_next_task_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
 {
        struct task_struct *stop = rq->stop;
 
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
new file mode 100644 (file)
index 0000000..1b0b4fb
--- /dev/null
@@ -0,0 +1,1658 @@
+/*
+ * Scheduler topology setup/handling methods
+ */
+#include <linux/sched.h>
+#include <linux/mutex.h>
+
+#include "sched.h"
+
+DEFINE_MUTEX(sched_domains_mutex);
+
+/* Protected by sched_domains_mutex: */
+cpumask_var_t sched_domains_tmpmask;
+
+#ifdef CONFIG_SCHED_DEBUG
+
+static __read_mostly int sched_debug_enabled;
+
+static int __init sched_debug_setup(char *str)
+{
+       sched_debug_enabled = 1;
+
+       return 0;
+}
+early_param("sched_debug", sched_debug_setup);
+
+static inline bool sched_debug(void)
+{
+       return sched_debug_enabled;
+}
+
+static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
+                                 struct cpumask *groupmask)
+{
+       struct sched_group *group = sd->groups;
+
+       cpumask_clear(groupmask);
+
+       printk(KERN_DEBUG "%*s domain %d: ", level, "", level);
+
+       if (!(sd->flags & SD_LOAD_BALANCE)) {
+               printk("does not load-balance\n");
+               if (sd->parent)
+                       printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain"
+                                       " has parent");
+               return -1;
+       }
+
+       printk(KERN_CONT "span %*pbl level %s\n",
+              cpumask_pr_args(sched_domain_span(sd)), sd->name);
+
+       if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
+               printk(KERN_ERR "ERROR: domain->span does not contain "
+                               "CPU%d\n", cpu);
+       }
+       if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) {
+               printk(KERN_ERR "ERROR: domain->groups does not contain"
+                               " CPU%d\n", cpu);
+       }
+
+       printk(KERN_DEBUG "%*s groups:", level + 1, "");
+       do {
+               if (!group) {
+                       printk("\n");
+                       printk(KERN_ERR "ERROR: group is NULL\n");
+                       break;
+               }
+
+               if (!cpumask_weight(sched_group_cpus(group))) {
+                       printk(KERN_CONT "\n");
+                       printk(KERN_ERR "ERROR: empty group\n");
+                       break;
+               }
+
+               if (!(sd->flags & SD_OVERLAP) &&
+                   cpumask_intersects(groupmask, sched_group_cpus(group))) {
+                       printk(KERN_CONT "\n");
+                       printk(KERN_ERR "ERROR: repeated CPUs\n");
+                       break;
+               }
+
+               cpumask_or(groupmask, groupmask, sched_group_cpus(group));
+
+               printk(KERN_CONT " %*pbl",
+                      cpumask_pr_args(sched_group_cpus(group)));
+               if (group->sgc->capacity != SCHED_CAPACITY_SCALE) {
+                       printk(KERN_CONT " (cpu_capacity = %lu)",
+                               group->sgc->capacity);
+               }
+
+               group = group->next;
+       } while (group != sd->groups);
+       printk(KERN_CONT "\n");
+
+       if (!cpumask_equal(sched_domain_span(sd), groupmask))
+               printk(KERN_ERR "ERROR: groups don't span domain->span\n");
+
+       if (sd->parent &&
+           !cpumask_subset(groupmask, sched_domain_span(sd->parent)))
+               printk(KERN_ERR "ERROR: parent span is not a superset "
+                       "of domain->span\n");
+       return 0;
+}
+
+static void sched_domain_debug(struct sched_domain *sd, int cpu)
+{
+       int level = 0;
+
+       if (!sched_debug_enabled)
+               return;
+
+       if (!sd) {
+               printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
+               return;
+       }
+
+       printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu);
+
+       for (;;) {
+               if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask))
+                       break;
+               level++;
+               sd = sd->parent;
+               if (!sd)
+                       break;
+       }
+}
+#else /* !CONFIG_SCHED_DEBUG */
+
+# define sched_debug_enabled 0
+# define sched_domain_debug(sd, cpu) do { } while (0)
+static inline bool sched_debug(void)
+{
+       return false;
+}
+#endif /* CONFIG_SCHED_DEBUG */
+
+static int sd_degenerate(struct sched_domain *sd)
+{
+       if (cpumask_weight(sched_domain_span(sd)) == 1)
+               return 1;
+
+       /* Following flags need at least 2 groups */
+       if (sd->flags & (SD_LOAD_BALANCE |
+                        SD_BALANCE_NEWIDLE |
+                        SD_BALANCE_FORK |
+                        SD_BALANCE_EXEC |
+                        SD_SHARE_CPUCAPACITY |
+                        SD_ASYM_CPUCAPACITY |
+                        SD_SHARE_PKG_RESOURCES |
+                        SD_SHARE_POWERDOMAIN)) {
+               if (sd->groups != sd->groups->next)
+                       return 0;
+       }
+
+       /* Following flags don't use groups */
+       if (sd->flags & (SD_WAKE_AFFINE))
+               return 0;
+
+       return 1;
+}
+
+static int
+sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
+{
+       unsigned long cflags = sd->flags, pflags = parent->flags;
+
+       if (sd_degenerate(parent))
+               return 1;
+
+       if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent)))
+               return 0;
+
+       /* Flags needing groups don't count if only 1 group in parent */
+       if (parent->groups == parent->groups->next) {
+               pflags &= ~(SD_LOAD_BALANCE |
+                               SD_BALANCE_NEWIDLE |
+                               SD_BALANCE_FORK |
+                               SD_BALANCE_EXEC |
+                               SD_ASYM_CPUCAPACITY |
+                               SD_SHARE_CPUCAPACITY |
+                               SD_SHARE_PKG_RESOURCES |
+                               SD_PREFER_SIBLING |
+                               SD_SHARE_POWERDOMAIN);
+               if (nr_node_ids == 1)
+                       pflags &= ~SD_SERIALIZE;
+       }
+       if (~cflags & pflags)
+               return 0;
+
+       return 1;
+}
+
+static void free_rootdomain(struct rcu_head *rcu)
+{
+       struct root_domain *rd = container_of(rcu, struct root_domain, rcu);
+
+       cpupri_cleanup(&rd->cpupri);
+       cpudl_cleanup(&rd->cpudl);
+       free_cpumask_var(rd->dlo_mask);
+       free_cpumask_var(rd->rto_mask);
+       free_cpumask_var(rd->online);
+       free_cpumask_var(rd->span);
+       kfree(rd);
+}
+
+void rq_attach_root(struct rq *rq, struct root_domain *rd)
+{
+       struct root_domain *old_rd = NULL;
+       unsigned long flags;
+
+       raw_spin_lock_irqsave(&rq->lock, flags);
+
+       if (rq->rd) {
+               old_rd = rq->rd;
+
+               if (cpumask_test_cpu(rq->cpu, old_rd->online))
+                       set_rq_offline(rq);
+
+               cpumask_clear_cpu(rq->cpu, old_rd->span);
+
+               /*
+                * If we dont want to free the old_rd yet then
+                * set old_rd to NULL to skip the freeing later
+                * in this function:
+                */
+               if (!atomic_dec_and_test(&old_rd->refcount))
+                       old_rd = NULL;
+       }
+
+       atomic_inc(&rd->refcount);
+       rq->rd = rd;
+
+       cpumask_set_cpu(rq->cpu, rd->span);
+       if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
+               set_rq_online(rq);
+
+       raw_spin_unlock_irqrestore(&rq->lock, flags);
+
+       if (old_rd)
+               call_rcu_sched(&old_rd->rcu, free_rootdomain);
+}
+
+static int init_rootdomain(struct root_domain *rd)
+{
+       memset(rd, 0, sizeof(*rd));
+
+       if (!zalloc_cpumask_var(&rd->span, GFP_KERNEL))
+               goto out;
+       if (!zalloc_cpumask_var(&rd->online, GFP_KERNEL))
+               goto free_span;
+       if (!zalloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL))
+               goto free_online;
+       if (!zalloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
+               goto free_dlo_mask;
+
+       init_dl_bw(&rd->dl_bw);
+       if (cpudl_init(&rd->cpudl) != 0)
+               goto free_rto_mask;
+
+       if (cpupri_init(&rd->cpupri) != 0)
+               goto free_cpudl;
+       return 0;
+
+free_cpudl:
+       cpudl_cleanup(&rd->cpudl);
+free_rto_mask:
+       free_cpumask_var(rd->rto_mask);
+free_dlo_mask:
+       free_cpumask_var(rd->dlo_mask);
+free_online:
+       free_cpumask_var(rd->online);
+free_span:
+       free_cpumask_var(rd->span);
+out:
+       return -ENOMEM;
+}
+
+/*
+ * By default the system creates a single root-domain with all CPUs as
+ * members (mimicking the global state we have today).
+ */
+struct root_domain def_root_domain;
+
+void init_defrootdomain(void)
+{
+       init_rootdomain(&def_root_domain);
+
+       atomic_set(&def_root_domain.refcount, 1);
+}
+
+static struct root_domain *alloc_rootdomain(void)
+{
+       struct root_domain *rd;
+
+       rd = kmalloc(sizeof(*rd), GFP_KERNEL);
+       if (!rd)
+               return NULL;
+
+       if (init_rootdomain(rd) != 0) {
+               kfree(rd);
+               return NULL;
+       }
+
+       return rd;
+}
+
+static void free_sched_groups(struct sched_group *sg, int free_sgc)
+{
+       struct sched_group *tmp, *first;
+
+       if (!sg)
+               return;
+
+       first = sg;
+       do {
+               tmp = sg->next;
+
+               if (free_sgc && atomic_dec_and_test(&sg->sgc->ref))
+                       kfree(sg->sgc);
+
+               kfree(sg);
+               sg = tmp;
+       } while (sg != first);
+}
+
+static void destroy_sched_domain(struct sched_domain *sd)
+{
+       /*
+        * If its an overlapping domain it has private groups, iterate and
+        * nuke them all.
+        */
+       if (sd->flags & SD_OVERLAP) {
+               free_sched_groups(sd->groups, 1);
+       } else if (atomic_dec_and_test(&sd->groups->ref)) {
+               kfree(sd->groups->sgc);
+               kfree(sd->groups);
+       }
+       if (sd->shared && atomic_dec_and_test(&sd->shared->ref))
+               kfree(sd->shared);
+       kfree(sd);
+}
+
+static void destroy_sched_domains_rcu(struct rcu_head *rcu)
+{
+       struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu);
+
+       while (sd) {
+               struct sched_domain *parent = sd->parent;
+               destroy_sched_domain(sd);
+               sd = parent;
+       }
+}
+
+static void destroy_sched_domains(struct sched_domain *sd)
+{
+       if (sd)
+               call_rcu(&sd->rcu, destroy_sched_domains_rcu);
+}
+
+/*
+ * Keep a special pointer to the highest sched_domain that has
+ * SD_SHARE_PKG_RESOURCE set (Last Level Cache Domain) for this
+ * allows us to avoid some pointer chasing select_idle_sibling().
+ *
+ * Also keep a unique ID per domain (we use the first CPU number in
+ * the cpumask of the domain), this allows us to quickly tell if
+ * two CPUs are in the same cache domain, see cpus_share_cache().
+ */
+DEFINE_PER_CPU(struct sched_domain *, sd_llc);
+DEFINE_PER_CPU(int, sd_llc_size);
+DEFINE_PER_CPU(int, sd_llc_id);
+DEFINE_PER_CPU(struct sched_domain_shared *, sd_llc_shared);
+DEFINE_PER_CPU(struct sched_domain *, sd_numa);
+DEFINE_PER_CPU(struct sched_domain *, sd_asym);
+
+static void update_top_cache_domain(int cpu)
+{
+       struct sched_domain_shared *sds = NULL;
+       struct sched_domain *sd;
+       int id = cpu;
+       int size = 1;
+
+       sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES);
+       if (sd) {
+               id = cpumask_first(sched_domain_span(sd));
+               size = cpumask_weight(sched_domain_span(sd));
+               sds = sd->shared;
+       }
+
+       rcu_assign_pointer(per_cpu(sd_llc, cpu), sd);
+       per_cpu(sd_llc_size, cpu) = size;
+       per_cpu(sd_llc_id, cpu) = id;
+       rcu_assign_pointer(per_cpu(sd_llc_shared, cpu), sds);
+
+       sd = lowest_flag_domain(cpu, SD_NUMA);
+       rcu_assign_pointer(per_cpu(sd_numa, cpu), sd);
+
+       sd = highest_flag_domain(cpu, SD_ASYM_PACKING);
+       rcu_assign_pointer(per_cpu(sd_asym, cpu), sd);
+}
+
+/*
+ * Attach the domain 'sd' to 'cpu' as its base domain. Callers must
+ * hold the hotplug lock.
+ */
+static void
+cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
+{
+       struct rq *rq = cpu_rq(cpu);
+       struct sched_domain *tmp;
+
+       /* Remove the sched domains which do not contribute to scheduling. */
+       for (tmp = sd; tmp; ) {
+               struct sched_domain *parent = tmp->parent;
+               if (!parent)
+                       break;
+
+               if (sd_parent_degenerate(tmp, parent)) {
+                       tmp->parent = parent->parent;
+                       if (parent->parent)
+                               parent->parent->child = tmp;
+                       /*
+                        * Transfer SD_PREFER_SIBLING down in case of a
+                        * degenerate parent; the spans match for this
+                        * so the property transfers.
+                        */
+                       if (parent->flags & SD_PREFER_SIBLING)
+                               tmp->flags |= SD_PREFER_SIBLING;
+                       destroy_sched_domain(parent);
+               } else
+                       tmp = tmp->parent;
+       }
+
+       if (sd && sd_degenerate(sd)) {
+               tmp = sd;
+               sd = sd->parent;
+               destroy_sched_domain(tmp);
+               if (sd)
+                       sd->child = NULL;
+       }
+
+       sched_domain_debug(sd, cpu);
+
+       rq_attach_root(rq, rd);
+       tmp = rq->sd;
+       rcu_assign_pointer(rq->sd, sd);
+       destroy_sched_domains(tmp);
+
+       update_top_cache_domain(cpu);
+}
+
+/* Setup the mask of CPUs configured for isolated domains */
+static int __init isolated_cpu_setup(char *str)
+{
+       int ret;
+
+       alloc_bootmem_cpumask_var(&cpu_isolated_map);
+       ret = cpulist_parse(str, cpu_isolated_map);
+       if (ret) {
+               pr_err("sched: Error, all isolcpus= values must be between 0 and %d\n", nr_cpu_ids);
+               return 0;
+       }
+       return 1;
+}
+__setup("isolcpus=", isolated_cpu_setup);
+
+struct s_data {
+       struct sched_domain ** __percpu sd;
+       struct root_domain      *rd;
+};
+
+enum s_alloc {
+       sa_rootdomain,
+       sa_sd,
+       sa_sd_storage,
+       sa_none,
+};
+
+/*
+ * Build an iteration mask that can exclude certain CPUs from the upwards
+ * domain traversal.
+ *
+ * Asymmetric node setups can result in situations where the domain tree is of
+ * unequal depth, make sure to skip domains that already cover the entire
+ * range.
+ *
+ * In that case build_sched_domains() will have terminated the iteration early
+ * and our sibling sd spans will be empty. Domains should always include the
+ * CPU they're built on, so check that.
+ */
+static void build_group_mask(struct sched_domain *sd, struct sched_group *sg)
+{
+       const struct cpumask *span = sched_domain_span(sd);
+       struct sd_data *sdd = sd->private;
+       struct sched_domain *sibling;
+       int i;
+
+       for_each_cpu(i, span) {
+               sibling = *per_cpu_ptr(sdd->sd, i);
+               if (!cpumask_test_cpu(i, sched_domain_span(sibling)))
+                       continue;
+
+               cpumask_set_cpu(i, sched_group_mask(sg));
+       }
+}
+
+/*
+ * Return the canonical balance CPU for this group, this is the first CPU
+ * of this group that's also in the iteration mask.
+ */
+int group_balance_cpu(struct sched_group *sg)
+{
+       return cpumask_first_and(sched_group_cpus(sg), sched_group_mask(sg));
+}
+
+static int
+build_overlap_sched_groups(struct sched_domain *sd, int cpu)
+{
+       struct sched_group *first = NULL, *last = NULL, *groups = NULL, *sg;
+       const struct cpumask *span = sched_domain_span(sd);
+       struct cpumask *covered = sched_domains_tmpmask;
+       struct sd_data *sdd = sd->private;
+       struct sched_domain *sibling;
+       int i;
+
+       cpumask_clear(covered);
+
+       for_each_cpu(i, span) {
+               struct cpumask *sg_span;
+
+               if (cpumask_test_cpu(i, covered))
+                       continue;
+
+               sibling = *per_cpu_ptr(sdd->sd, i);
+
+               /* See the comment near build_group_mask(). */
+               if (!cpumask_test_cpu(i, sched_domain_span(sibling)))
+                       continue;
+
+               sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
+                               GFP_KERNEL, cpu_to_node(cpu));
+
+               if (!sg)
+                       goto fail;
+
+               sg_span = sched_group_cpus(sg);
+               if (sibling->child)
+                       cpumask_copy(sg_span, sched_domain_span(sibling->child));
+               else
+                       cpumask_set_cpu(i, sg_span);
+
+               cpumask_or(covered, covered, sg_span);
+
+               sg->sgc = *per_cpu_ptr(sdd->sgc, i);
+               if (atomic_inc_return(&sg->sgc->ref) == 1)
+                       build_group_mask(sd, sg);
+
+               /*
+                * Initialize sgc->capacity such that even if we mess up the
+                * domains and no possible iteration will get us here, we won't
+                * die on a /0 trap.
+                */
+               sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span);
+               sg->sgc->min_capacity = SCHED_CAPACITY_SCALE;
+
+               /*
+                * Make sure the first group of this domain contains the
+                * canonical balance CPU. Otherwise the sched_domain iteration
+                * breaks. See update_sg_lb_stats().
+                */
+               if ((!groups && cpumask_test_cpu(cpu, sg_span)) ||
+                   group_balance_cpu(sg) == cpu)
+                       groups = sg;
+
+               if (!first)
+                       first = sg;
+               if (last)
+                       last->next = sg;
+               last = sg;
+               last->next = first;
+       }
+       sd->groups = groups;
+
+       return 0;
+
+fail:
+       free_sched_groups(first, 0);
+
+       return -ENOMEM;
+}
+
+static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg)
+{
+       struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
+       struct sched_domain *child = sd->child;
+
+       if (child)
+               cpu = cpumask_first(sched_domain_span(child));
+
+       if (sg) {
+               *sg = *per_cpu_ptr(sdd->sg, cpu);
+               (*sg)->sgc = *per_cpu_ptr(sdd->sgc, cpu);
+
+               /* For claim_allocations: */
+               atomic_set(&(*sg)->sgc->ref, 1);
+       }
+
+       return cpu;
+}
+
+/*
+ * build_sched_groups will build a circular linked list of the groups
+ * covered by the given span, and will set each group's ->cpumask correctly,
+ * and ->cpu_capacity to 0.
+ *
+ * Assumes the sched_domain tree is fully constructed
+ */
+static int
+build_sched_groups(struct sched_domain *sd, int cpu)
+{
+       struct sched_group *first = NULL, *last = NULL;
+       struct sd_data *sdd = sd->private;
+       const struct cpumask *span = sched_domain_span(sd);
+       struct cpumask *covered;
+       int i;
+
+       get_group(cpu, sdd, &sd->groups);
+       atomic_inc(&sd->groups->ref);
+
+       if (cpu != cpumask_first(span))
+               return 0;
+
+       lockdep_assert_held(&sched_domains_mutex);
+       covered = sched_domains_tmpmask;
+
+       cpumask_clear(covered);
+
+       for_each_cpu(i, span) {
+               struct sched_group *sg;
+               int group, j;
+
+               if (cpumask_test_cpu(i, covered))
+                       continue;
+
+               group = get_group(i, sdd, &sg);
+               cpumask_setall(sched_group_mask(sg));
+
+               for_each_cpu(j, span) {
+                       if (get_group(j, sdd, NULL) != group)
+                               continue;
+
+                       cpumask_set_cpu(j, covered);
+                       cpumask_set_cpu(j, sched_group_cpus(sg));
+               }
+
+               if (!first)
+                       first = sg;
+               if (last)
+                       last->next = sg;
+               last = sg;
+       }
+       last->next = first;
+
+       return 0;
+}
+
+/*
+ * Initialize sched groups cpu_capacity.
+ *
+ * cpu_capacity indicates the capacity of sched group, which is used while
+ * distributing the load between different sched groups in a sched domain.
+ * Typically cpu_capacity for all the groups in a sched domain will be same
+ * unless there are asymmetries in the topology. If there are asymmetries,
+ * group having more cpu_capacity will pickup more load compared to the
+ * group having less cpu_capacity.
+ */
+static void init_sched_groups_capacity(int cpu, struct sched_domain *sd)
+{
+       struct sched_group *sg = sd->groups;
+
+       WARN_ON(!sg);
+
+       do {
+               int cpu, max_cpu = -1;
+
+               sg->group_weight = cpumask_weight(sched_group_cpus(sg));
+
+               if (!(sd->flags & SD_ASYM_PACKING))
+                       goto next;
+
+               for_each_cpu(cpu, sched_group_cpus(sg)) {
+                       if (max_cpu < 0)
+                               max_cpu = cpu;
+                       else if (sched_asym_prefer(cpu, max_cpu))
+                               max_cpu = cpu;
+               }
+               sg->asym_prefer_cpu = max_cpu;
+
+next:
+               sg = sg->next;
+       } while (sg != sd->groups);
+
+       if (cpu != group_balance_cpu(sg))
+               return;
+
+       update_group_capacity(sd, cpu);
+}
+
+/*
+ * Initializers for schedule domains
+ * Non-inlined to reduce accumulated stack pressure in build_sched_domains()
+ */
+
+static int default_relax_domain_level = -1;
+int sched_domain_level_max;
+
+static int __init setup_relax_domain_level(char *str)
+{
+       if (kstrtoint(str, 0, &default_relax_domain_level))
+               pr_warn("Unable to set relax_domain_level\n");
+
+       return 1;
+}
+__setup("relax_domain_level=", setup_relax_domain_level);
+
+static void set_domain_attribute(struct sched_domain *sd,
+                                struct sched_domain_attr *attr)
+{
+       int request;
+
+       if (!attr || attr->relax_domain_level < 0) {
+               if (default_relax_domain_level < 0)
+                       return;
+               else
+                       request = default_relax_domain_level;
+       } else
+               request = attr->relax_domain_level;
+       if (request < sd->level) {
+               /* Turn off idle balance on this domain: */
+               sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
+       } else {
+               /* Turn on idle balance on this domain: */
+               sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
+       }
+}
+
+static void __sdt_free(const struct cpumask *cpu_map);
+static int __sdt_alloc(const struct cpumask *cpu_map);
+
+static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
+                                const struct cpumask *cpu_map)
+{
+       switch (what) {
+       case sa_rootdomain:
+               if (!atomic_read(&d->rd->refcount))
+                       free_rootdomain(&d->rd->rcu);
+               /* Fall through */
+       case sa_sd:
+               free_percpu(d->sd);
+               /* Fall through */
+       case sa_sd_storage:
+               __sdt_free(cpu_map);
+               /* Fall through */
+       case sa_none:
+               break;
+       }
+}
+
+static enum s_alloc
+__visit_domain_allocation_hell(struct s_data *d, const struct cpumask *cpu_map)
+{
+       memset(d, 0, sizeof(*d));
+
+       if (__sdt_alloc(cpu_map))
+               return sa_sd_storage;
+       d->sd = alloc_percpu(struct sched_domain *);
+       if (!d->sd)
+               return sa_sd_storage;
+       d->rd = alloc_rootdomain();
+       if (!d->rd)
+               return sa_sd;
+       return sa_rootdomain;
+}
+
+/*
+ * NULL the sd_data elements we've used to build the sched_domain and
+ * sched_group structure so that the subsequent __free_domain_allocs()
+ * will not free the data we're using.
+ */
+static void claim_allocations(int cpu, struct sched_domain *sd)
+{
+       struct sd_data *sdd = sd->private;
+
+       WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd);
+       *per_cpu_ptr(sdd->sd, cpu) = NULL;
+
+       if (atomic_read(&(*per_cpu_ptr(sdd->sds, cpu))->ref))
+               *per_cpu_ptr(sdd->sds, cpu) = NULL;
+
+       if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref))
+               *per_cpu_ptr(sdd->sg, cpu) = NULL;
+
+       if (atomic_read(&(*per_cpu_ptr(sdd->sgc, cpu))->ref))
+               *per_cpu_ptr(sdd->sgc, cpu) = NULL;
+}
+
+#ifdef CONFIG_NUMA
+static int sched_domains_numa_levels;
+enum numa_topology_type sched_numa_topology_type;
+static int *sched_domains_numa_distance;
+int sched_max_numa_distance;
+static struct cpumask ***sched_domains_numa_masks;
+static int sched_domains_curr_level;
+#endif
+
+/*
+ * SD_flags allowed in topology descriptions.
+ *
+ * These flags are purely descriptive of the topology and do not prescribe
+ * behaviour. Behaviour is artificial and mapped in the below sd_init()
+ * function:
+ *
+ *   SD_SHARE_CPUCAPACITY   - describes SMT topologies
+ *   SD_SHARE_PKG_RESOURCES - describes shared caches
+ *   SD_NUMA                - describes NUMA topologies
+ *   SD_SHARE_POWERDOMAIN   - describes shared power domain
+ *   SD_ASYM_CPUCAPACITY    - describes mixed capacity topologies
+ *
+ * Odd one out, which beside describing the topology has a quirk also
+ * prescribes the desired behaviour that goes along with it:
+ *
+ *   SD_ASYM_PACKING        - describes SMT quirks
+ */
+#define TOPOLOGY_SD_FLAGS              \
+       (SD_SHARE_CPUCAPACITY |         \
+        SD_SHARE_PKG_RESOURCES |       \
+        SD_NUMA |                      \
+        SD_ASYM_PACKING |              \
+        SD_ASYM_CPUCAPACITY |          \
+        SD_SHARE_POWERDOMAIN)
+
+static struct sched_domain *
+sd_init(struct sched_domain_topology_level *tl,
+       const struct cpumask *cpu_map,
+       struct sched_domain *child, int cpu)
+{
+       struct sd_data *sdd = &tl->data;
+       struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
+       int sd_id, sd_weight, sd_flags = 0;
+
+#ifdef CONFIG_NUMA
+       /*
+        * Ugly hack to pass state to sd_numa_mask()...
+        */
+       sched_domains_curr_level = tl->numa_level;
+#endif
+
+       sd_weight = cpumask_weight(tl->mask(cpu));
+
+       if (tl->sd_flags)
+               sd_flags = (*tl->sd_flags)();
+       if (WARN_ONCE(sd_flags & ~TOPOLOGY_SD_FLAGS,
+                       "wrong sd_flags in topology description\n"))
+               sd_flags &= ~TOPOLOGY_SD_FLAGS;
+
+       *sd = (struct sched_domain){
+               .min_interval           = sd_weight,
+               .max_interval           = 2*sd_weight,
+               .busy_factor            = 32,
+               .imbalance_pct          = 125,
+
+               .cache_nice_tries       = 0,
+               .busy_idx               = 0,
+               .idle_idx               = 0,
+               .newidle_idx            = 0,
+               .wake_idx               = 0,
+               .forkexec_idx           = 0,
+
+               .flags                  = 1*SD_LOAD_BALANCE
+                                       | 1*SD_BALANCE_NEWIDLE
+                                       | 1*SD_BALANCE_EXEC
+                                       | 1*SD_BALANCE_FORK
+                                       | 0*SD_BALANCE_WAKE
+                                       | 1*SD_WAKE_AFFINE
+                                       | 0*SD_SHARE_CPUCAPACITY
+                                       | 0*SD_SHARE_PKG_RESOURCES
+                                       | 0*SD_SERIALIZE
+                                       | 0*SD_PREFER_SIBLING
+                                       | 0*SD_NUMA
+                                       | sd_flags
+                                       ,
+
+               .last_balance           = jiffies,
+               .balance_interval       = sd_weight,
+               .smt_gain               = 0,
+               .max_newidle_lb_cost    = 0,
+               .next_decay_max_lb_cost = jiffies,
+               .child                  = child,
+#ifdef CONFIG_SCHED_DEBUG
+               .name                   = tl->name,
+#endif
+       };
+
+       cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu));
+       sd_id = cpumask_first(sched_domain_span(sd));
+
+       /*
+        * Convert topological properties into behaviour.
+        */
+
+       if (sd->flags & SD_ASYM_CPUCAPACITY) {
+               struct sched_domain *t = sd;
+
+               for_each_lower_domain(t)
+                       t->flags |= SD_BALANCE_WAKE;
+       }
+
+       if (sd->flags & SD_SHARE_CPUCAPACITY) {
+               sd->flags |= SD_PREFER_SIBLING;
+               sd->imbalance_pct = 110;
+               sd->smt_gain = 1178; /* ~15% */
+
+       } else if (sd->flags & SD_SHARE_PKG_RESOURCES) {
+               sd->imbalance_pct = 117;
+               sd->cache_nice_tries = 1;
+               sd->busy_idx = 2;
+
+#ifdef CONFIG_NUMA
+       } else if (sd->flags & SD_NUMA) {
+               sd->cache_nice_tries = 2;
+               sd->busy_idx = 3;
+               sd->idle_idx = 2;
+
+               sd->flags |= SD_SERIALIZE;
+               if (sched_domains_numa_distance[tl->numa_level] > RECLAIM_DISTANCE) {
+                       sd->flags &= ~(SD_BALANCE_EXEC |
+                                      SD_BALANCE_FORK |
+                                      SD_WAKE_AFFINE);
+               }
+
+#endif
+       } else {
+               sd->flags |= SD_PREFER_SIBLING;
+               sd->cache_nice_tries = 1;
+               sd->busy_idx = 2;
+               sd->idle_idx = 1;
+       }
+
+       /*
+        * For all levels sharing cache; connect a sched_domain_shared
+        * instance.
+        */
+       if (sd->flags & SD_SHARE_PKG_RESOURCES) {
+               sd->shared = *per_cpu_ptr(sdd->sds, sd_id);
+               atomic_inc(&sd->shared->ref);
+               atomic_set(&sd->shared->nr_busy_cpus, sd_weight);
+       }
+
+       sd->private = sdd;
+
+       return sd;
+}
+
+/*
+ * Topology list, bottom-up.
+ */
+static struct sched_domain_topology_level default_topology[] = {
+#ifdef CONFIG_SCHED_SMT
+       { cpu_smt_mask, cpu_smt_flags, SD_INIT_NAME(SMT) },
+#endif
+#ifdef CONFIG_SCHED_MC
+       { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
+#endif
+       { cpu_cpu_mask, SD_INIT_NAME(DIE) },
+       { NULL, },
+};
+
+static struct sched_domain_topology_level *sched_domain_topology =
+       default_topology;
+
+#define for_each_sd_topology(tl)                       \
+       for (tl = sched_domain_topology; tl->mask; tl++)
+
+void set_sched_topology(struct sched_domain_topology_level *tl)
+{
+       if (WARN_ON_ONCE(sched_smp_initialized))
+               return;
+
+       sched_domain_topology = tl;
+}
+
+#ifdef CONFIG_NUMA
+
+static const struct cpumask *sd_numa_mask(int cpu)
+{
+       return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)];
+}
+
+static void sched_numa_warn(const char *str)
+{
+       static int done = false;
+       int i,j;
+
+       if (done)
+               return;
+
+       done = true;
+
+       printk(KERN_WARNING "ERROR: %s\n\n", str);
+
+       for (i = 0; i < nr_node_ids; i++) {
+               printk(KERN_WARNING "  ");
+               for (j = 0; j < nr_node_ids; j++)
+                       printk(KERN_CONT "%02d ", node_distance(i,j));
+               printk(KERN_CONT "\n");
+       }
+       printk(KERN_WARNING "\n");
+}
+
+bool find_numa_distance(int distance)
+{
+       int i;
+
+       if (distance == node_distance(0, 0))
+               return true;
+
+       for (i = 0; i < sched_domains_numa_levels; i++) {
+               if (sched_domains_numa_distance[i] == distance)
+                       return true;
+       }
+
+       return false;
+}
+
+/*
+ * A system can have three types of NUMA topology:
+ * NUMA_DIRECT: all nodes are directly connected, or not a NUMA system
+ * NUMA_GLUELESS_MESH: some nodes reachable through intermediary nodes
+ * NUMA_BACKPLANE: nodes can reach other nodes through a backplane
+ *
+ * The difference between a glueless mesh topology and a backplane
+ * topology lies in whether communication between not directly
+ * connected nodes goes through intermediary nodes (where programs
+ * could run), or through backplane controllers. This affects
+ * placement of programs.
+ *
+ * The type of topology can be discerned with the following tests:
+ * - If the maximum distance between any nodes is 1 hop, the system
+ *   is directly connected.
+ * - If for two nodes A and B, located N > 1 hops away from each other,
+ *   there is an intermediary node C, which is < N hops away from both
+ *   nodes A and B, the system is a glueless mesh.
+ */
+static void init_numa_topology_type(void)
+{
+       int a, b, c, n;
+
+       n = sched_max_numa_distance;
+
+       if (sched_domains_numa_levels <= 1) {
+               sched_numa_topology_type = NUMA_DIRECT;
+               return;
+       }
+
+       for_each_online_node(a) {
+               for_each_online_node(b) {
+                       /* Find two nodes furthest removed from each other. */
+                       if (node_distance(a, b) < n)
+                               continue;
+
+                       /* Is there an intermediary node between a and b? */
+                       for_each_online_node(c) {
+                               if (node_distance(a, c) < n &&
+                                   node_distance(b, c) < n) {
+                                       sched_numa_topology_type =
+                                                       NUMA_GLUELESS_MESH;
+                                       return;
+                               }
+                       }
+
+                       sched_numa_topology_type = NUMA_BACKPLANE;
+                       return;
+               }
+       }
+}
+
+void sched_init_numa(void)
+{
+       int next_distance, curr_distance = node_distance(0, 0);
+       struct sched_domain_topology_level *tl;
+       int level = 0;
+       int i, j, k;
+
+       sched_domains_numa_distance = kzalloc(sizeof(int) * nr_node_ids, GFP_KERNEL);
+       if (!sched_domains_numa_distance)
+               return;
+
+       /*
+        * O(nr_nodes^2) deduplicating selection sort -- in order to find the
+        * unique distances in the node_distance() table.
+        *
+        * Assumes node_distance(0,j) includes all distances in
+        * node_distance(i,j) in order to avoid cubic time.
+        */
+       next_distance = curr_distance;
+       for (i = 0; i < nr_node_ids; i++) {
+               for (j = 0; j < nr_node_ids; j++) {
+                       for (k = 0; k < nr_node_ids; k++) {
+                               int distance = node_distance(i, k);
+
+                               if (distance > curr_distance &&
+                                   (distance < next_distance ||
+                                    next_distance == curr_distance))
+                                       next_distance = distance;
+
+                               /*
+                                * While not a strong assumption it would be nice to know
+                                * about cases where if node A is connected to B, B is not
+                                * equally connected to A.
+                                */
+                               if (sched_debug() && node_distance(k, i) != distance)
+                                       sched_numa_warn("Node-distance not symmetric");
+
+                               if (sched_debug() && i && !find_numa_distance(distance))
+                                       sched_numa_warn("Node-0 not representative");
+                       }
+                       if (next_distance != curr_distance) {
+                               sched_domains_numa_distance[level++] = next_distance;
+                               sched_domains_numa_levels = level;
+                               curr_distance = next_distance;
+                       } else break;
+               }
+
+               /*
+                * In case of sched_debug() we verify the above assumption.
+                */
+               if (!sched_debug())
+                       break;
+       }
+
+       if (!level)
+               return;
+
+       /*
+        * 'level' contains the number of unique distances, excluding the
+        * identity distance node_distance(i,i).
+        *
+        * The sched_domains_numa_distance[] array includes the actual distance
+        * numbers.
+        */
+
+       /*
+        * Here, we should temporarily reset sched_domains_numa_levels to 0.
+        * If it fails to allocate memory for array sched_domains_numa_masks[][],
+        * the array will contain less then 'level' members. This could be
+        * dangerous when we use it to iterate array sched_domains_numa_masks[][]
+        * in other functions.
+        *
+        * We reset it to 'level' at the end of this function.
+        */
+       sched_domains_numa_levels = 0;
+
+       sched_domains_numa_masks = kzalloc(sizeof(void *) * level, GFP_KERNEL);
+       if (!sched_domains_numa_masks)
+               return;
+
+       /*
+        * Now for each level, construct a mask per node which contains all
+        * CPUs of nodes that are that many hops away from us.
+        */
+       for (i = 0; i < level; i++) {
+               sched_domains_numa_masks[i] =
+                       kzalloc(nr_node_ids * sizeof(void *), GFP_KERNEL);
+               if (!sched_domains_numa_masks[i])
+                       return;
+
+               for (j = 0; j < nr_node_ids; j++) {
+                       struct cpumask *mask = kzalloc(cpumask_size(), GFP_KERNEL);
+                       if (!mask)
+                               return;
+
+                       sched_domains_numa_masks[i][j] = mask;
+
+                       for_each_node(k) {
+                               if (node_distance(j, k) > sched_domains_numa_distance[i])
+                                       continue;
+
+                               cpumask_or(mask, mask, cpumask_of_node(k));
+                       }
+               }
+       }
+
+       /* Compute default topology size */
+       for (i = 0; sched_domain_topology[i].mask; i++);
+
+       tl = kzalloc((i + level + 1) *
+                       sizeof(struct sched_domain_topology_level), GFP_KERNEL);
+       if (!tl)
+               return;
+
+       /*
+        * Copy the default topology bits..
+        */
+       for (i = 0; sched_domain_topology[i].mask; i++)
+               tl[i] = sched_domain_topology[i];
+
+       /*
+        * .. and append 'j' levels of NUMA goodness.
+        */
+       for (j = 0; j < level; i++, j++) {
+               tl[i] = (struct sched_domain_topology_level){
+                       .mask = sd_numa_mask,
+                       .sd_flags = cpu_numa_flags,
+                       .flags = SDTL_OVERLAP,
+                       .numa_level = j,
+                       SD_INIT_NAME(NUMA)
+               };
+       }
+
+       sched_domain_topology = tl;
+
+       sched_domains_numa_levels = level;
+       sched_max_numa_distance = sched_domains_numa_distance[level - 1];
+
+       init_numa_topology_type();
+}
+
+void sched_domains_numa_masks_set(unsigned int cpu)
+{
+       int node = cpu_to_node(cpu);
+       int i, j;
+
+       for (i = 0; i < sched_domains_numa_levels; i++) {
+               for (j = 0; j < nr_node_ids; j++) {
+                       if (node_distance(j, node) <= sched_domains_numa_distance[i])
+                               cpumask_set_cpu(cpu, sched_domains_numa_masks[i][j]);
+               }
+       }
+}
+
+void sched_domains_numa_masks_clear(unsigned int cpu)
+{
+       int i, j;
+
+       for (i = 0; i < sched_domains_numa_levels; i++) {
+               for (j = 0; j < nr_node_ids; j++)
+                       cpumask_clear_cpu(cpu, sched_domains_numa_masks[i][j]);
+       }
+}
+
+#endif /* CONFIG_NUMA */
+
+static int __sdt_alloc(const struct cpumask *cpu_map)
+{
+       struct sched_domain_topology_level *tl;
+       int j;
+
+       for_each_sd_topology(tl) {
+               struct sd_data *sdd = &tl->data;
+
+               sdd->sd = alloc_percpu(struct sched_domain *);
+               if (!sdd->sd)
+                       return -ENOMEM;
+
+               sdd->sds = alloc_percpu(struct sched_domain_shared *);
+               if (!sdd->sds)
+                       return -ENOMEM;
+
+               sdd->sg = alloc_percpu(struct sched_group *);
+               if (!sdd->sg)
+                       return -ENOMEM;
+
+               sdd->sgc = alloc_percpu(struct sched_group_capacity *);
+               if (!sdd->sgc)
+                       return -ENOMEM;
+
+               for_each_cpu(j, cpu_map) {
+                       struct sched_domain *sd;
+                       struct sched_domain_shared *sds;
+                       struct sched_group *sg;
+                       struct sched_group_capacity *sgc;
+
+                       sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(),
+                                       GFP_KERNEL, cpu_to_node(j));
+                       if (!sd)
+                               return -ENOMEM;
+
+                       *per_cpu_ptr(sdd->sd, j) = sd;
+
+                       sds = kzalloc_node(sizeof(struct sched_domain_shared),
+                                       GFP_KERNEL, cpu_to_node(j));
+                       if (!sds)
+                               return -ENOMEM;
+
+                       *per_cpu_ptr(sdd->sds, j) = sds;
+
+                       sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
+                                       GFP_KERNEL, cpu_to_node(j));
+                       if (!sg)
+                               return -ENOMEM;
+
+                       sg->next = sg;
+
+                       *per_cpu_ptr(sdd->sg, j) = sg;
+
+                       sgc = kzalloc_node(sizeof(struct sched_group_capacity) + cpumask_size(),
+                                       GFP_KERNEL, cpu_to_node(j));
+                       if (!sgc)
+                               return -ENOMEM;
+
+                       *per_cpu_ptr(sdd->sgc, j) = sgc;
+               }
+       }
+
+       return 0;
+}
+
+static void __sdt_free(const struct cpumask *cpu_map)
+{
+       struct sched_domain_topology_level *tl;
+       int j;
+
+       for_each_sd_topology(tl) {
+               struct sd_data *sdd = &tl->data;
+
+               for_each_cpu(j, cpu_map) {
+                       struct sched_domain *sd;
+
+                       if (sdd->sd) {
+                               sd = *per_cpu_ptr(sdd->sd, j);
+                               if (sd && (sd->flags & SD_OVERLAP))
+                                       free_sched_groups(sd->groups, 0);
+                               kfree(*per_cpu_ptr(sdd->sd, j));
+                       }
+
+                       if (sdd->sds)
+                               kfree(*per_cpu_ptr(sdd->sds, j));
+                       if (sdd->sg)
+                               kfree(*per_cpu_ptr(sdd->sg, j));
+                       if (sdd->sgc)
+                               kfree(*per_cpu_ptr(sdd->sgc, j));
+               }
+               free_percpu(sdd->sd);
+               sdd->sd = NULL;
+               free_percpu(sdd->sds);
+               sdd->sds = NULL;
+               free_percpu(sdd->sg);
+               sdd->sg = NULL;
+               free_percpu(sdd->sgc);
+               sdd->sgc = NULL;
+       }
+}
+
+struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
+               const struct cpumask *cpu_map, struct sched_domain_attr *attr,
+               struct sched_domain *child, int cpu)
+{
+       struct sched_domain *sd = sd_init(tl, cpu_map, child, cpu);
+
+       if (child) {
+               sd->level = child->level + 1;
+               sched_domain_level_max = max(sched_domain_level_max, sd->level);
+               child->parent = sd;
+
+               if (!cpumask_subset(sched_domain_span(child),
+                                   sched_domain_span(sd))) {
+                       pr_err("BUG: arch topology borken\n");
+#ifdef CONFIG_SCHED_DEBUG
+                       pr_err("     the %s domain not a subset of the %s domain\n",
+                                       child->name, sd->name);
+#endif
+                       /* Fixup, ensure @sd has at least @child cpus. */
+                       cpumask_or(sched_domain_span(sd),
+                                  sched_domain_span(sd),
+                                  sched_domain_span(child));
+               }
+
+       }
+       set_domain_attribute(sd, attr);
+
+       return sd;
+}
+
+/*
+ * Build sched domains for a given set of CPUs and attach the sched domains
+ * to the individual CPUs
+ */
+static int
+build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *attr)
+{
+       enum s_alloc alloc_state;
+       struct sched_domain *sd;
+       struct s_data d;
+       struct rq *rq = NULL;
+       int i, ret = -ENOMEM;
+
+       alloc_state = __visit_domain_allocation_hell(&d, cpu_map);
+       if (alloc_state != sa_rootdomain)
+               goto error;
+
+       /* Set up domains for CPUs specified by the cpu_map: */
+       for_each_cpu(i, cpu_map) {
+               struct sched_domain_topology_level *tl;
+
+               sd = NULL;
+               for_each_sd_topology(tl) {
+                       sd = build_sched_domain(tl, cpu_map, attr, sd, i);
+                       if (tl == sched_domain_topology)
+                               *per_cpu_ptr(d.sd, i) = sd;
+                       if (tl->flags & SDTL_OVERLAP || sched_feat(FORCE_SD_OVERLAP))
+                               sd->flags |= SD_OVERLAP;
+                       if (cpumask_equal(cpu_map, sched_domain_span(sd)))
+                               break;
+               }
+       }
+
+       /* Build the groups for the domains */
+       for_each_cpu(i, cpu_map) {
+               for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
+                       sd->span_weight = cpumask_weight(sched_domain_span(sd));
+                       if (sd->flags & SD_OVERLAP) {
+                               if (build_overlap_sched_groups(sd, i))
+                                       goto error;
+                       } else {
+                               if (build_sched_groups(sd, i))
+                                       goto error;
+                       }
+               }
+       }
+
+       /* Calculate CPU capacity for physical packages and nodes */
+       for (i = nr_cpumask_bits-1; i >= 0; i--) {
+               if (!cpumask_test_cpu(i, cpu_map))
+                       continue;
+
+               for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
+                       claim_allocations(i, sd);
+                       init_sched_groups_capacity(i, sd);
+               }
+       }
+
+       /* Attach the domains */
+       rcu_read_lock();
+       for_each_cpu(i, cpu_map) {
+               rq = cpu_rq(i);
+               sd = *per_cpu_ptr(d.sd, i);
+
+               /* Use READ_ONCE()/WRITE_ONCE() to avoid load/store tearing: */
+               if (rq->cpu_capacity_orig > READ_ONCE(d.rd->max_cpu_capacity))
+                       WRITE_ONCE(d.rd->max_cpu_capacity, rq->cpu_capacity_orig);
+
+               cpu_attach_domain(sd, d.rd, i);
+       }
+       rcu_read_unlock();
+
+       if (rq && sched_debug_enabled) {
+               pr_info("span: %*pbl (max cpu_capacity = %lu)\n",
+                       cpumask_pr_args(cpu_map), rq->rd->max_cpu_capacity);
+       }
+
+       ret = 0;
+error:
+       __free_domain_allocs(&d, alloc_state, cpu_map);
+       return ret;
+}
+
+/* Current sched domains: */
+static cpumask_var_t                   *doms_cur;
+
+/* Number of sched domains in 'doms_cur': */
+static int                             ndoms_cur;
+
+/* Attribues of custom domains in 'doms_cur' */
+static struct sched_domain_attr                *dattr_cur;
+
+/*
+ * Special case: If a kmalloc() of a doms_cur partition (array of
+ * cpumask) fails, then fallback to a single sched domain,
+ * as determined by the single cpumask fallback_doms.
+ */
+cpumask_var_t                          fallback_doms;
+
+/*
+ * arch_update_cpu_topology lets virtualized architectures update the
+ * CPU core maps. It is supposed to return 1 if the topology changed
+ * or 0 if it stayed the same.
+ */
+int __weak arch_update_cpu_topology(void)
+{
+       return 0;
+}
+
+cpumask_var_t *alloc_sched_domains(unsigned int ndoms)
+{
+       int i;
+       cpumask_var_t *doms;
+
+       doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL);
+       if (!doms)
+               return NULL;
+       for (i = 0; i < ndoms; i++) {
+               if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) {
+                       free_sched_domains(doms, i);
+                       return NULL;
+               }
+       }
+       return doms;
+}
+
+void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms)
+{
+       unsigned int i;
+       for (i = 0; i < ndoms; i++)
+               free_cpumask_var(doms[i]);
+       kfree(doms);
+}
+
+/*
+ * Set up scheduler domains and groups. Callers must hold the hotplug lock.
+ * For now this just excludes isolated CPUs, but could be used to
+ * exclude other special cases in the future.
+ */
+int init_sched_domains(const struct cpumask *cpu_map)
+{
+       int err;
+
+       arch_update_cpu_topology();
+       ndoms_cur = 1;
+       doms_cur = alloc_sched_domains(ndoms_cur);
+       if (!doms_cur)
+               doms_cur = &fallback_doms;
+       cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map);
+       err = build_sched_domains(doms_cur[0], NULL);
+       register_sched_domain_sysctl();
+
+       return err;
+}
+
+/*
+ * Detach sched domains from a group of CPUs specified in cpu_map
+ * These CPUs will now be attached to the NULL domain
+ */
+static void detach_destroy_domains(const struct cpumask *cpu_map)
+{
+       int i;
+
+       rcu_read_lock();
+       for_each_cpu(i, cpu_map)
+               cpu_attach_domain(NULL, &def_root_domain, i);
+       rcu_read_unlock();
+}
+
+/* handle null as "default" */
+static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
+                       struct sched_domain_attr *new, int idx_new)
+{
+       struct sched_domain_attr tmp;
+
+       /* Fast path: */
+       if (!new && !cur)
+               return 1;
+
+       tmp = SD_ATTR_INIT;
+       return !memcmp(cur ? (cur + idx_cur) : &tmp,
+                       new ? (new + idx_new) : &tmp,
+                       sizeof(struct sched_domain_attr));
+}
+
+/*
+ * Partition sched domains as specified by the 'ndoms_new'
+ * cpumasks in the array doms_new[] of cpumasks. This compares
+ * doms_new[] to the current sched domain partitioning, doms_cur[].
+ * It destroys each deleted domain and builds each new domain.
+ *
+ * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'.
+ * The masks don't intersect (don't overlap.) We should setup one
+ * sched domain for each mask. CPUs not in any of the cpumasks will
+ * not be load balanced. If the same cpumask appears both in the
+ * current 'doms_cur' domains and in the new 'doms_new', we can leave
+ * it as it is.
+ *
+ * The passed in 'doms_new' should be allocated using
+ * alloc_sched_domains.  This routine takes ownership of it and will
+ * free_sched_domains it when done with it. If the caller failed the
+ * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1,
+ * and partition_sched_domains() will fallback to the single partition
+ * 'fallback_doms', it also forces the domains to be rebuilt.
+ *
+ * If doms_new == NULL it will be replaced with cpu_online_mask.
+ * ndoms_new == 0 is a special case for destroying existing domains,
+ * and it will not create the default domain.
+ *
+ * Call with hotplug lock held
+ */
+void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
+                            struct sched_domain_attr *dattr_new)
+{
+       int i, j, n;
+       int new_topology;
+
+       mutex_lock(&sched_domains_mutex);
+
+       /* Always unregister in case we don't destroy any domains: */
+       unregister_sched_domain_sysctl();
+
+       /* Let the architecture update CPU core mappings: */
+       new_topology = arch_update_cpu_topology();
+
+       n = doms_new ? ndoms_new : 0;
+
+       /* Destroy deleted domains: */
+       for (i = 0; i < ndoms_cur; i++) {
+               for (j = 0; j < n && !new_topology; j++) {
+                       if (cpumask_equal(doms_cur[i], doms_new[j])
+                           && dattrs_equal(dattr_cur, i, dattr_new, j))
+                               goto match1;
+               }
+               /* No match - a current sched domain not in new doms_new[] */
+               detach_destroy_domains(doms_cur[i]);
+match1:
+               ;
+       }
+
+       n = ndoms_cur;
+       if (doms_new == NULL) {
+               n = 0;
+               doms_new = &fallback_doms;
+               cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map);
+               WARN_ON_ONCE(dattr_new);
+       }
+
+       /* Build new domains: */
+       for (i = 0; i < ndoms_new; i++) {
+               for (j = 0; j < n && !new_topology; j++) {
+                       if (cpumask_equal(doms_new[i], doms_cur[j])
+                           && dattrs_equal(dattr_new, i, dattr_cur, j))
+                               goto match2;
+               }
+               /* No match - add a new doms_new */
+               build_sched_domains(doms_new[i], dattr_new ? dattr_new + i : NULL);
+match2:
+               ;
+       }
+
+       /* Remember the new sched domains: */
+       if (doms_cur != &fallback_doms)
+               free_sched_domains(doms_cur, ndoms_cur);
+
+       kfree(dattr_cur);
+       doms_cur = doms_new;
+       dattr_cur = dattr_new;
+       ndoms_cur = ndoms_new;
+
+       register_sched_domain_sysctl();
+
+       mutex_unlock(&sched_domains_mutex);
+}
+
index 3603d93a19689be7188a004f2b999b27e0ebdf2f..13f9def8b24aecf662dd12095e1d505e909785e4 100644 (file)
@@ -1581,7 +1581,7 @@ bool do_notify_parent(struct task_struct *tsk, int sig)
        unsigned long flags;
        struct sighand_struct *psig;
        bool autoreap = false;
-       cputime_t utime, stime;
+       u64 utime, stime;
 
        BUG_ON(sig == -1);
 
@@ -1620,8 +1620,8 @@ bool do_notify_parent(struct task_struct *tsk, int sig)
        rcu_read_unlock();
 
        task_cputime(tsk, &utime, &stime);
-       info.si_utime = cputime_to_clock_t(utime + tsk->signal->utime);
-       info.si_stime = cputime_to_clock_t(stime + tsk->signal->stime);
+       info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
+       info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
 
        info.si_status = tsk->exit_code & 0x7f;
        if (tsk->exit_code & 0x80)
@@ -1685,7 +1685,7 @@ static void do_notify_parent_cldstop(struct task_struct *tsk,
        unsigned long flags;
        struct task_struct *parent;
        struct sighand_struct *sighand;
-       cputime_t utime, stime;
+       u64 utime, stime;
 
        if (for_ptracer) {
                parent = tsk->parent;
@@ -1705,8 +1705,8 @@ static void do_notify_parent_cldstop(struct task_struct *tsk,
        rcu_read_unlock();
 
        task_cputime(tsk, &utime, &stime);
-       info.si_utime = cputime_to_clock_t(utime);
-       info.si_stime = cputime_to_clock_t(stime);
+       info.si_utime = nsec_to_clock_t(utime);
+       info.si_stime = nsec_to_clock_t(stime);
 
        info.si_code = why;
        switch (why) {
index b6e4c16377c708027c1c8c52914106fb71e3ca93..9c15a9124e83b50661414fa425393cdb51866313 100644 (file)
@@ -18,10 +18,8 @@ void print_stack_trace(struct stack_trace *trace, int spaces)
        if (WARN_ON(!trace->entries))
                return;
 
-       for (i = 0; i < trace->nr_entries; i++) {
-               printk("%*c", 1 + spaces, ' ');
-               print_ip_sym(trace->entries[i]);
-       }
+       for (i = 0; i < trace->nr_entries; i++)
+               printk("%*c%pS\n", 1 + spaces, ' ', (void *)trace->entries[i]);
 }
 EXPORT_SYMBOL_GPL(print_stack_trace);
 
@@ -29,7 +27,6 @@ int snprint_stack_trace(char *buf, size_t size,
                        struct stack_trace *trace, int spaces)
 {
        int i;
-       unsigned long ip;
        int generated;
        int total = 0;
 
@@ -37,9 +34,8 @@ int snprint_stack_trace(char *buf, size_t size,
                return 0;
 
        for (i = 0; i < trace->nr_entries; i++) {
-               ip = trace->entries[i];
-               generated = snprintf(buf, size, "%*c[<%p>] %pS\n",
-                               1 + spaces, ' ', (void *) ip, (void *) ip);
+               generated = snprintf(buf, size, "%*c%pS\n", 1 + spaces, ' ',
+                                    (void *)trace->entries[i]);
 
                total += generated;
 
index 842914ef7de4b58a09c1b545d4cf804cb075983a..7d4a9a6df95688027da8236897225f95c7894889 100644 (file)
@@ -881,15 +881,15 @@ SYSCALL_DEFINE0(getegid)
 
 void do_sys_times(struct tms *tms)
 {
-       cputime_t tgutime, tgstime, cutime, cstime;
+       u64 tgutime, tgstime, cutime, cstime;
 
        thread_group_cputime_adjusted(current, &tgutime, &tgstime);
        cutime = current->signal->cutime;
        cstime = current->signal->cstime;
-       tms->tms_utime = cputime_to_clock_t(tgutime);
-       tms->tms_stime = cputime_to_clock_t(tgstime);
-       tms->tms_cutime = cputime_to_clock_t(cutime);
-       tms->tms_cstime = cputime_to_clock_t(cstime);
+       tms->tms_utime = nsec_to_clock_t(tgutime);
+       tms->tms_stime = nsec_to_clock_t(tgstime);
+       tms->tms_cutime = nsec_to_clock_t(cutime);
+       tms->tms_cstime = nsec_to_clock_t(cstime);
 }
 
 SYSCALL_DEFINE1(times, struct tms __user *, tbuf)
@@ -1544,7 +1544,7 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
 {
        struct task_struct *t;
        unsigned long flags;
-       cputime_t tgutime, tgstime, utime, stime;
+       u64 tgutime, tgstime, utime, stime;
        unsigned long maxrss = 0;
 
        memset((char *)r, 0, sizeof (*r));
@@ -1600,8 +1600,8 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
        unlock_task_sighand(p, &flags);
 
 out:
-       cputime_to_timeval(utime, &r->ru_utime);
-       cputime_to_timeval(stime, &r->ru_stime);
+       r->ru_utime = ns_to_timeval(utime);
+       r->ru_stime = ns_to_timeval(stime);
 
        if (who != RUSAGE_CHILDREN) {
                struct mm_struct *mm = get_task_mm(p);
index 8dbaec0e4f7f079b87f50ea67c82341304387783..bb260ceb3718477fe1cce3c5690c42be4c30087e 100644 (file)
@@ -416,7 +416,7 @@ static struct ctl_table kern_table[] = {
        },
        {
                .procname       = "sched_rr_timeslice_ms",
-               .data           = &sched_rr_timeslice,
+               .data           = &sysctl_sched_rr_timeslice,
                .maxlen         = sizeof(int),
                .mode           = 0644,
                .proc_handler   = sched_rr_handler,
@@ -2475,6 +2475,7 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
                                break;
                        if (neg)
                                continue;
+                       val = convmul * val / convdiv;
                        if ((min && val < *min) || (max && val > *max))
                                continue;
                        *i = val;
index 976840d29a714f82ff2960a76a9f046f203a0fff..938dbf33ef493cfac9f9b9a53b004bfe2420d281 100644 (file)
@@ -15,6 +15,5 @@ ifeq ($(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST),y)
 endif
 obj-$(CONFIG_GENERIC_SCHED_CLOCK)              += sched_clock.o
 obj-$(CONFIG_TICK_ONESHOT)                     += tick-oneshot.o tick-sched.o
-obj-$(CONFIG_TIMER_STATS)                      += timer_stats.o
 obj-$(CONFIG_DEBUG_FS)                         += timekeeping_debug.o
 obj-$(CONFIG_TEST_UDELAY)                      += test_udelay.o
index 665985b0a89afe88486fed0e2571771bdd4d94b5..93621ae718d391ac6a95cd561dfc22bd6dc15a18 100644 (file)
@@ -141,6 +141,10 @@ static void __clocksource_unstable(struct clocksource *cs)
 {
        cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG);
        cs->flags |= CLOCK_SOURCE_UNSTABLE;
+
+       if (cs->mark_unstable)
+               cs->mark_unstable(cs);
+
        if (finished_booting)
                schedule_work(&watchdog_work);
 }
index c6ecedd3b8393d4b3f4fc3b5cce41ebbffd375cd..8e11d8d9f419e2b72a20f5f7313f50fecc24e614 100644 (file)
@@ -94,17 +94,15 @@ DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
 };
 
 static const int hrtimer_clock_to_base_table[MAX_CLOCKS] = {
+       /* Make sure we catch unsupported clockids */
+       [0 ... MAX_CLOCKS - 1]  = HRTIMER_MAX_CLOCK_BASES,
+
        [CLOCK_REALTIME]        = HRTIMER_BASE_REALTIME,
        [CLOCK_MONOTONIC]       = HRTIMER_BASE_MONOTONIC,
        [CLOCK_BOOTTIME]        = HRTIMER_BASE_BOOTTIME,
        [CLOCK_TAI]             = HRTIMER_BASE_TAI,
 };
 
-static inline int hrtimer_clockid_to_base(clockid_t clock_id)
-{
-       return hrtimer_clock_to_base_table[clock_id];
-}
-
 /*
  * Functions and macros which are different for UP/SMP systems are kept in a
  * single place
@@ -766,34 +764,6 @@ void hrtimers_resume(void)
        clock_was_set_delayed();
 }
 
-static inline void timer_stats_hrtimer_set_start_info(struct hrtimer *timer)
-{
-#ifdef CONFIG_TIMER_STATS
-       if (timer->start_site)
-               return;
-       timer->start_site = __builtin_return_address(0);
-       memcpy(timer->start_comm, current->comm, TASK_COMM_LEN);
-       timer->start_pid = current->pid;
-#endif
-}
-
-static inline void timer_stats_hrtimer_clear_start_info(struct hrtimer *timer)
-{
-#ifdef CONFIG_TIMER_STATS
-       timer->start_site = NULL;
-#endif
-}
-
-static inline void timer_stats_account_hrtimer(struct hrtimer *timer)
-{
-#ifdef CONFIG_TIMER_STATS
-       if (likely(!timer_stats_active))
-               return;
-       timer_stats_update_stats(timer, timer->start_pid, timer->start_site,
-                                timer->function, timer->start_comm, 0);
-#endif
-}
-
 /*
  * Counterpart to lock_hrtimer_base above:
  */
@@ -932,7 +902,6 @@ remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base, bool rest
                 * rare case and less expensive than a smp call.
                 */
                debug_deactivate(timer);
-               timer_stats_hrtimer_clear_start_info(timer);
                reprogram = base->cpu_base == this_cpu_ptr(&hrtimer_bases);
 
                if (!restart)
@@ -990,8 +959,6 @@ void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
        /* Switch the timer base, if necessary: */
        new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED);
 
-       timer_stats_hrtimer_set_start_info(timer);
-
        leftmost = enqueue_hrtimer(timer, new_base);
        if (!leftmost)
                goto unlock;
@@ -1112,6 +1079,18 @@ u64 hrtimer_get_next_event(void)
 }
 #endif
 
+static inline int hrtimer_clockid_to_base(clockid_t clock_id)
+{
+       if (likely(clock_id < MAX_CLOCKS)) {
+               int base = hrtimer_clock_to_base_table[clock_id];
+
+               if (likely(base != HRTIMER_MAX_CLOCK_BASES))
+                       return base;
+       }
+       WARN(1, "Invalid clockid %d. Using MONOTONIC\n", clock_id);
+       return HRTIMER_BASE_MONOTONIC;
+}
+
 static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
                           enum hrtimer_mode mode)
 {
@@ -1128,12 +1107,6 @@ static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
        base = hrtimer_clockid_to_base(clock_id);
        timer->base = &cpu_base->clock_base[base];
        timerqueue_init(&timer->node);
-
-#ifdef CONFIG_TIMER_STATS
-       timer->start_site = NULL;
-       timer->start_pid = -1;
-       memset(timer->start_comm, 0, TASK_COMM_LEN);
-#endif
 }
 
 /**
@@ -1217,7 +1190,6 @@ static void __run_hrtimer(struct hrtimer_cpu_base *cpu_base,
        raw_write_seqcount_barrier(&cpu_base->seq);
 
        __remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE, 0);
-       timer_stats_account_hrtimer(timer);
        fn = timer->function;
 
        /*
index 8c89143f9ebf19fb96ac47b9c22be08fbafdc583..a95f13c314645f9591d70942c74aeaeaa3de08de 100644 (file)
@@ -45,16 +45,16 @@ static struct timeval itimer_get_remtime(struct hrtimer *timer)
 static void get_cpu_itimer(struct task_struct *tsk, unsigned int clock_id,
                           struct itimerval *const value)
 {
-       cputime_t cval, cinterval;
+       u64 val, interval;
        struct cpu_itimer *it = &tsk->signal->it[clock_id];
 
        spin_lock_irq(&tsk->sighand->siglock);
 
-       cval = it->expires;
-       cinterval = it->incr;
-       if (cval) {
+       val = it->expires;
+       interval = it->incr;
+       if (val) {
                struct task_cputime cputime;
-               cputime_t t;
+               u64 t;
 
                thread_group_cputimer(tsk, &cputime);
                if (clock_id == CPUCLOCK_PROF)
@@ -63,17 +63,17 @@ static void get_cpu_itimer(struct task_struct *tsk, unsigned int clock_id,
                        /* CPUCLOCK_VIRT */
                        t = cputime.utime;
 
-               if (cval < t)
+               if (val < t)
                        /* about to fire */
-                       cval = cputime_one_jiffy;
+                       val = TICK_NSEC;
                else
-                       cval = cval - t;
+                       val -= t;
        }
 
        spin_unlock_irq(&tsk->sighand->siglock);
 
-       cputime_to_timeval(cval, &value->it_value);
-       cputime_to_timeval(cinterval, &value->it_interval);
+       value->it_value = ns_to_timeval(val);
+       value->it_interval = ns_to_timeval(interval);
 }
 
 int do_getitimer(int which, struct itimerval *value)
@@ -129,55 +129,35 @@ enum hrtimer_restart it_real_fn(struct hrtimer *timer)
        return HRTIMER_NORESTART;
 }
 
-static inline u32 cputime_sub_ns(cputime_t ct, s64 real_ns)
-{
-       struct timespec ts;
-       s64 cpu_ns;
-
-       cputime_to_timespec(ct, &ts);
-       cpu_ns = timespec_to_ns(&ts);
-
-       return (cpu_ns <= real_ns) ? 0 : cpu_ns - real_ns;
-}
-
 static void set_cpu_itimer(struct task_struct *tsk, unsigned int clock_id,
                           const struct itimerval *const value,
                           struct itimerval *const ovalue)
 {
-       cputime_t cval, nval, cinterval, ninterval;
-       s64 ns_ninterval, ns_nval;
-       u32 error, incr_error;
+       u64 oval, nval, ointerval, ninterval;
        struct cpu_itimer *it = &tsk->signal->it[clock_id];
 
-       nval = timeval_to_cputime(&value->it_value);
-       ns_nval = timeval_to_ns(&value->it_value);
-       ninterval = timeval_to_cputime(&value->it_interval);
-       ns_ninterval = timeval_to_ns(&value->it_interval);
-
-       error = cputime_sub_ns(nval, ns_nval);
-       incr_error = cputime_sub_ns(ninterval, ns_ninterval);
+       nval = timeval_to_ns(&value->it_value);
+       ninterval = timeval_to_ns(&value->it_interval);
 
        spin_lock_irq(&tsk->sighand->siglock);
 
-       cval = it->expires;
-       cinterval = it->incr;
-       if (cval || nval) {
+       oval = it->expires;
+       ointerval = it->incr;
+       if (oval || nval) {
                if (nval > 0)
-                       nval += cputime_one_jiffy;
-               set_process_cpu_timer(tsk, clock_id, &nval, &cval);
+                       nval += TICK_NSEC;
+               set_process_cpu_timer(tsk, clock_id, &nval, &oval);
        }
        it->expires = nval;
        it->incr = ninterval;
-       it->error = error;
-       it->incr_error = incr_error;
        trace_itimer_state(clock_id == CPUCLOCK_VIRT ?
                           ITIMER_VIRTUAL : ITIMER_PROF, value, nval);
 
        spin_unlock_irq(&tsk->sighand->siglock);
 
        if (ovalue) {
-               cputime_to_timeval(cval, &ovalue->it_value);
-               cputime_to_timeval(cinterval, &ovalue->it_interval);
+               ovalue->it_value = ns_to_timeval(oval);
+               ovalue->it_interval = ns_to_timeval(ointerval);
        }
 }
 
index a4a0e478e44d16208d4333248995bbac610fc75b..7906b3f0c41a1a5b662c703c428a892ad7816670 100644 (file)
 
 #include "timekeeping.h"
 
-/* The Jiffies based clocksource is the lowest common
- * denominator clock source which should function on
- * all systems. It has the same coarse resolution as
- * the timer interrupt frequency HZ and it suffers
- * inaccuracies caused by missed or lost timer
- * interrupts and the inability for the timer
- * interrupt hardware to accuratly tick at the
- * requested HZ value. It is also not recommended
- * for "tick-less" systems.
- */
-#define NSEC_PER_JIFFY ((NSEC_PER_SEC+HZ/2)/HZ)
 
-/* Since jiffies uses a simple NSEC_PER_JIFFY multiplier
+/* Since jiffies uses a simple TICK_NSEC multiplier
  * conversion, the .shift value could be zero. However
  * this would make NTP adjustments impossible as they are
  * in units of 1/2^.shift. Thus we use JIFFIES_SHIFT to
@@ -47,8 +36,8 @@
  * amount, and give ntp adjustments in units of 1/2^8
  *
  * The value 8 is somewhat carefully chosen, as anything
- * larger can result in overflows. NSEC_PER_JIFFY grows as
- * HZ shrinks, so values greater than 8 overflow 32bits when
+ * larger can result in overflows. TICK_NSEC grows as HZ
+ * shrinks, so values greater than 8 overflow 32bits when
  * HZ=100.
  */
 #if HZ < 34
@@ -64,12 +53,23 @@ static u64 jiffies_read(struct clocksource *cs)
        return (u64) jiffies;
 }
 
+/*
+ * The Jiffies based clocksource is the lowest common
+ * denominator clock source which should function on
+ * all systems. It has the same coarse resolution as
+ * the timer interrupt frequency HZ and it suffers
+ * inaccuracies caused by missed or lost timer
+ * interrupts and the inability for the timer
+ * interrupt hardware to accuratly tick at the
+ * requested HZ value. It is also not recommended
+ * for "tick-less" systems.
+ */
 static struct clocksource clocksource_jiffies = {
        .name           = "jiffies",
        .rating         = 1, /* lowest valid rating*/
        .read           = jiffies_read,
        .mask           = CLOCKSOURCE_MASK(32),
-       .mult           = NSEC_PER_JIFFY << JIFFIES_SHIFT, /* details above */
+       .mult           = TICK_NSEC << JIFFIES_SHIFT, /* details above */
        .shift          = JIFFIES_SHIFT,
        .max_cycles     = 10,
 };
@@ -125,7 +125,7 @@ int register_refined_jiffies(long cycles_per_second)
        shift_hz += cycles_per_tick/2;
        do_div(shift_hz, cycles_per_tick);
        /* Calculate nsec_per_tick using shift_hz */
-       nsec_per_tick = (u64)NSEC_PER_SEC << 8;
+       nsec_per_tick = (u64)TICK_NSEC << 8;
        nsec_per_tick += (u32)shift_hz/2;
        do_div(nsec_per_tick, (u32)shift_hz);
 
index e9e8c10f0d9abc351ce78025c3719a98b64c9e53..b4377a5e42694873867fc1b91ef93a48479b9180 100644 (file)
  */
 void update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new)
 {
-       cputime_t cputime = secs_to_cputime(rlim_new);
+       u64 nsecs = rlim_new * NSEC_PER_SEC;
 
        spin_lock_irq(&task->sighand->siglock);
-       set_process_cpu_timer(task, CPUCLOCK_PROF, &cputime, NULL);
+       set_process_cpu_timer(task, CPUCLOCK_PROF, &nsecs, NULL);
        spin_unlock_irq(&task->sighand->siglock);
 }
 
@@ -50,39 +50,14 @@ static int check_clock(const clockid_t which_clock)
        return error;
 }
 
-static inline unsigned long long
-timespec_to_sample(const clockid_t which_clock, const struct timespec *tp)
-{
-       unsigned long long ret;
-
-       ret = 0;                /* high half always zero when .cpu used */
-       if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
-               ret = (unsigned long long)tp->tv_sec * NSEC_PER_SEC + tp->tv_nsec;
-       } else {
-               ret = cputime_to_expires(timespec_to_cputime(tp));
-       }
-       return ret;
-}
-
-static void sample_to_timespec(const clockid_t which_clock,
-                              unsigned long long expires,
-                              struct timespec *tp)
-{
-       if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED)
-               *tp = ns_to_timespec(expires);
-       else
-               cputime_to_timespec((__force cputime_t)expires, tp);
-}
-
 /*
  * Update expiry time from increment, and increase overrun count,
  * given the current clock sample.
  */
-static void bump_cpu_timer(struct k_itimer *timer,
-                          unsigned long long now)
+static void bump_cpu_timer(struct k_itimer *timer, u64 now)
 {
        int i;
-       unsigned long long delta, incr;
+       u64 delta, incr;
 
        if (timer->it.cpu.incr == 0)
                return;
@@ -122,21 +97,21 @@ static inline int task_cputime_zero(const struct task_cputime *cputime)
        return 0;
 }
 
-static inline unsigned long long prof_ticks(struct task_struct *p)
+static inline u64 prof_ticks(struct task_struct *p)
 {
-       cputime_t utime, stime;
+       u64 utime, stime;
 
        task_cputime(p, &utime, &stime);
 
-       return cputime_to_expires(utime + stime);
+       return utime + stime;
 }
-static inline unsigned long long virt_ticks(struct task_struct *p)
+static inline u64 virt_ticks(struct task_struct *p)
 {
-       cputime_t utime, stime;
+       u64 utime, stime;
 
        task_cputime(p, &utime, &stime);
 
-       return cputime_to_expires(utime);
+       return utime;
 }
 
 static int
@@ -176,8 +151,8 @@ posix_cpu_clock_set(const clockid_t which_clock, const struct timespec *tp)
 /*
  * Sample a per-thread clock for the given task.
  */
-static int cpu_clock_sample(const clockid_t which_clock, struct task_struct *p,
-                           unsigned long long *sample)
+static int cpu_clock_sample(const clockid_t which_clock,
+                           struct task_struct *p, u64 *sample)
 {
        switch (CPUCLOCK_WHICH(which_clock)) {
        default:
@@ -260,7 +235,7 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times)
  */
 static int cpu_clock_sample_group(const clockid_t which_clock,
                                  struct task_struct *p,
-                                 unsigned long long *sample)
+                                 u64 *sample)
 {
        struct task_cputime cputime;
 
@@ -269,11 +244,11 @@ static int cpu_clock_sample_group(const clockid_t which_clock,
                return -EINVAL;
        case CPUCLOCK_PROF:
                thread_group_cputime(p, &cputime);
-               *sample = cputime_to_expires(cputime.utime + cputime.stime);
+               *sample = cputime.utime + cputime.stime;
                break;
        case CPUCLOCK_VIRT:
                thread_group_cputime(p, &cputime);
-               *sample = cputime_to_expires(cputime.utime);
+               *sample = cputime.utime;
                break;
        case CPUCLOCK_SCHED:
                thread_group_cputime(p, &cputime);
@@ -288,7 +263,7 @@ static int posix_cpu_clock_get_task(struct task_struct *tsk,
                                    struct timespec *tp)
 {
        int err = -EINVAL;
-       unsigned long long rtn;
+       u64 rtn;
 
        if (CPUCLOCK_PERTHREAD(which_clock)) {
                if (same_thread_group(tsk, current))
@@ -299,7 +274,7 @@ static int posix_cpu_clock_get_task(struct task_struct *tsk,
        }
 
        if (!err)
-               sample_to_timespec(which_clock, rtn, tp);
+               *tp = ns_to_timespec(rtn);
 
        return err;
 }
@@ -453,7 +428,7 @@ void posix_cpu_timers_exit_group(struct task_struct *tsk)
        cleanup_timers(tsk->signal->cpu_timers);
 }
 
-static inline int expires_gt(cputime_t expires, cputime_t new_exp)
+static inline int expires_gt(u64 expires, u64 new_exp)
 {
        return expires == 0 || expires > new_exp;
 }
@@ -488,7 +463,7 @@ static void arm_timer(struct k_itimer *timer)
        list_add(&nt->entry, listpos);
 
        if (listpos == head) {
-               unsigned long long exp = nt->expires;
+               u64 exp = nt->expires;
 
                /*
                 * We are the new earliest-expiring POSIX 1.b timer, hence
@@ -499,16 +474,15 @@ static void arm_timer(struct k_itimer *timer)
 
                switch (CPUCLOCK_WHICH(timer->it_clock)) {
                case CPUCLOCK_PROF:
-                       if (expires_gt(cputime_expires->prof_exp, expires_to_cputime(exp)))
-                               cputime_expires->prof_exp = expires_to_cputime(exp);
+                       if (expires_gt(cputime_expires->prof_exp, exp))
+                               cputime_expires->prof_exp = exp;
                        break;
                case CPUCLOCK_VIRT:
-                       if (expires_gt(cputime_expires->virt_exp, expires_to_cputime(exp)))
-                               cputime_expires->virt_exp = expires_to_cputime(exp);
+                       if (expires_gt(cputime_expires->virt_exp, exp))
+                               cputime_expires->virt_exp = exp;
                        break;
                case CPUCLOCK_SCHED:
-                       if (cputime_expires->sched_exp == 0 ||
-                           cputime_expires->sched_exp > exp)
+                       if (expires_gt(cputime_expires->sched_exp, exp))
                                cputime_expires->sched_exp = exp;
                        break;
                }
@@ -559,8 +533,7 @@ static void cpu_timer_fire(struct k_itimer *timer)
  * traversal.
  */
 static int cpu_timer_sample_group(const clockid_t which_clock,
-                                 struct task_struct *p,
-                                 unsigned long long *sample)
+                                 struct task_struct *p, u64 *sample)
 {
        struct task_cputime cputime;
 
@@ -569,10 +542,10 @@ static int cpu_timer_sample_group(const clockid_t which_clock,
        default:
                return -EINVAL;
        case CPUCLOCK_PROF:
-               *sample = cputime_to_expires(cputime.utime + cputime.stime);
+               *sample = cputime.utime + cputime.stime;
                break;
        case CPUCLOCK_VIRT:
-               *sample = cputime_to_expires(cputime.utime);
+               *sample = cputime.utime;
                break;
        case CPUCLOCK_SCHED:
                *sample = cputime.sum_exec_runtime;
@@ -593,12 +566,12 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
        unsigned long flags;
        struct sighand_struct *sighand;
        struct task_struct *p = timer->it.cpu.task;
-       unsigned long long old_expires, new_expires, old_incr, val;
+       u64 old_expires, new_expires, old_incr, val;
        int ret;
 
        WARN_ON_ONCE(p == NULL);
 
-       new_expires = timespec_to_sample(timer->it_clock, &new->it_value);
+       new_expires = timespec_to_ns(&new->it_value);
 
        /*
         * Protect against sighand release/switch in exit/exec and p->cpu_timers
@@ -659,9 +632,7 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
                        bump_cpu_timer(timer, val);
                        if (val < timer->it.cpu.expires) {
                                old_expires = timer->it.cpu.expires - val;
-                               sample_to_timespec(timer->it_clock,
-                                                  old_expires,
-                                                  &old->it_value);
+                               old->it_value = ns_to_timespec(old_expires);
                        } else {
                                old->it_value.tv_nsec = 1;
                                old->it_value.tv_sec = 0;
@@ -699,8 +670,7 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
         * Install the new reload setting, and
         * set up the signal and overrun bookkeeping.
         */
-       timer->it.cpu.incr = timespec_to_sample(timer->it_clock,
-                                               &new->it_interval);
+       timer->it.cpu.incr = timespec_to_ns(&new->it_interval);
 
        /*
         * This acts as a modification timestamp for the timer,
@@ -723,17 +693,15 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
 
        ret = 0;
  out:
-       if (old) {
-               sample_to_timespec(timer->it_clock,
-                                  old_incr, &old->it_interval);
-       }
+       if (old)
+               old->it_interval = ns_to_timespec(old_incr);
 
        return ret;
 }
 
 static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
 {
-       unsigned long long now;
+       u64 now;
        struct task_struct *p = timer->it.cpu.task;
 
        WARN_ON_ONCE(p == NULL);
@@ -741,8 +709,7 @@ static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
        /*
         * Easy part: convert the reload time.
         */
-       sample_to_timespec(timer->it_clock,
-                          timer->it.cpu.incr, &itp->it_interval);
+       itp->it_interval = ns_to_timespec(timer->it.cpu.incr);
 
        if (timer->it.cpu.expires == 0) {       /* Timer not armed at all.  */
                itp->it_value.tv_sec = itp->it_value.tv_nsec = 0;
@@ -771,8 +738,7 @@ static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
                         * Call the timer disarmed, nothing else to do.
                         */
                        timer->it.cpu.expires = 0;
-                       sample_to_timespec(timer->it_clock, timer->it.cpu.expires,
-                                          &itp->it_value);
+                       itp->it_value = ns_to_timespec(timer->it.cpu.expires);
                        return;
                } else {
                        cpu_timer_sample_group(timer->it_clock, p, &now);
@@ -781,9 +747,7 @@ static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
        }
 
        if (now < timer->it.cpu.expires) {
-               sample_to_timespec(timer->it_clock,
-                                  timer->it.cpu.expires - now,
-                                  &itp->it_value);
+               itp->it_value = ns_to_timespec(timer->it.cpu.expires - now);
        } else {
                /*
                 * The timer should have expired already, but the firing
@@ -827,7 +791,7 @@ static void check_thread_timers(struct task_struct *tsk,
        struct list_head *timers = tsk->cpu_timers;
        struct signal_struct *const sig = tsk->signal;
        struct task_cputime *tsk_expires = &tsk->cputime_expires;
-       unsigned long long expires;
+       u64 expires;
        unsigned long soft;
 
        /*
@@ -838,10 +802,10 @@ static void check_thread_timers(struct task_struct *tsk,
                return;
 
        expires = check_timers_list(timers, firing, prof_ticks(tsk));
-       tsk_expires->prof_exp = expires_to_cputime(expires);
+       tsk_expires->prof_exp = expires;
 
        expires = check_timers_list(++timers, firing, virt_ticks(tsk));
-       tsk_expires->virt_exp = expires_to_cputime(expires);
+       tsk_expires->virt_exp = expires;
 
        tsk_expires->sched_exp = check_timers_list(++timers, firing,
                                                   tsk->se.sum_exec_runtime);
@@ -890,26 +854,17 @@ static inline void stop_process_timers(struct signal_struct *sig)
        tick_dep_clear_signal(sig, TICK_DEP_BIT_POSIX_TIMER);
 }
 
-static u32 onecputick;
-
 static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it,
-                            unsigned long long *expires,
-                            unsigned long long cur_time, int signo)
+                            u64 *expires, u64 cur_time, int signo)
 {
        if (!it->expires)
                return;
 
        if (cur_time >= it->expires) {
-               if (it->incr) {
+               if (it->incr)
                        it->expires += it->incr;
-                       it->error += it->incr_error;
-                       if (it->error >= onecputick) {
-                               it->expires -= cputime_one_jiffy;
-                               it->error -= onecputick;
-                       }
-               } else {
+               else
                        it->expires = 0;
-               }
 
                trace_itimer_expire(signo == SIGPROF ?
                                    ITIMER_PROF : ITIMER_VIRTUAL,
@@ -917,9 +872,8 @@ static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it,
                __group_send_sig_info(signo, SEND_SIG_PRIV, tsk);
        }
 
-       if (it->expires && (!*expires || it->expires < *expires)) {
+       if (it->expires && (!*expires || it->expires < *expires))
                *expires = it->expires;
-       }
 }
 
 /*
@@ -931,8 +885,8 @@ static void check_process_timers(struct task_struct *tsk,
                                 struct list_head *firing)
 {
        struct signal_struct *const sig = tsk->signal;
-       unsigned long long utime, ptime, virt_expires, prof_expires;
-       unsigned long long sum_sched_runtime, sched_expires;
+       u64 utime, ptime, virt_expires, prof_expires;
+       u64 sum_sched_runtime, sched_expires;
        struct list_head *timers = sig->cpu_timers;
        struct task_cputime cputime;
        unsigned long soft;
@@ -954,8 +908,8 @@ static void check_process_timers(struct task_struct *tsk,
         * Collect the current process totals.
         */
        thread_group_cputimer(tsk, &cputime);
-       utime = cputime_to_expires(cputime.utime);
-       ptime = utime + cputime_to_expires(cputime.stime);
+       utime = cputime.utime;
+       ptime = utime + cputime.stime;
        sum_sched_runtime = cputime.sum_exec_runtime;
 
        prof_expires = check_timers_list(timers, firing, ptime);
@@ -971,10 +925,10 @@ static void check_process_timers(struct task_struct *tsk,
                         SIGVTALRM);
        soft = READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur);
        if (soft != RLIM_INFINITY) {
-               unsigned long psecs = cputime_to_secs(ptime);
+               unsigned long psecs = div_u64(ptime, NSEC_PER_SEC);
                unsigned long hard =
                        READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_max);
-               cputime_t x;
+               u64 x;
                if (psecs >= hard) {
                        /*
                         * At the hard limit, we just die.
@@ -993,14 +947,13 @@ static void check_process_timers(struct task_struct *tsk,
                                sig->rlim[RLIMIT_CPU].rlim_cur = soft;
                        }
                }
-               x = secs_to_cputime(soft);
-               if (!prof_expires || x < prof_expires) {
+               x = soft * NSEC_PER_SEC;
+               if (!prof_expires || x < prof_expires)
                        prof_expires = x;
-               }
        }
 
-       sig->cputime_expires.prof_exp = expires_to_cputime(prof_expires);
-       sig->cputime_expires.virt_exp = expires_to_cputime(virt_expires);
+       sig->cputime_expires.prof_exp = prof_expires;
+       sig->cputime_expires.virt_exp = virt_expires;
        sig->cputime_expires.sched_exp = sched_expires;
        if (task_cputime_zero(&sig->cputime_expires))
                stop_process_timers(sig);
@@ -1017,7 +970,7 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
        struct sighand_struct *sighand;
        unsigned long flags;
        struct task_struct *p = timer->it.cpu.task;
-       unsigned long long now;
+       u64 now;
 
        WARN_ON_ONCE(p == NULL);
 
@@ -1214,9 +1167,9 @@ void run_posix_cpu_timers(struct task_struct *tsk)
  * The tsk->sighand->siglock must be held by the caller.
  */
 void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
-                          cputime_t *newval, cputime_t *oldval)
+                          u64 *newval, u64 *oldval)
 {
-       unsigned long long now;
+       u64 now;
 
        WARN_ON_ONCE(clock_idx == CPUCLOCK_SCHED);
        cpu_timer_sample_group(clock_idx, tsk, &now);
@@ -1230,7 +1183,7 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
                if (*oldval) {
                        if (*oldval <= now) {
                                /* Just about to fire. */
-                               *oldval = cputime_one_jiffy;
+                               *oldval = TICK_NSEC;
                        } else {
                                *oldval -= now;
                        }
@@ -1310,7 +1263,7 @@ static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
                /*
                 * We were interrupted by a signal.
                 */
-               sample_to_timespec(which_clock, timer.it.cpu.expires, rqtp);
+               *rqtp = ns_to_timespec(timer.it.cpu.expires);
                error = posix_cpu_timer_set(&timer, 0, &zero_it, it);
                if (!error) {
                        /*
@@ -1476,15 +1429,10 @@ static __init int init_posix_cpu_timers(void)
                .clock_get      = thread_cpu_clock_get,
                .timer_create   = thread_cpu_timer_create,
        };
-       struct timespec ts;
 
        posix_timers_register_clock(CLOCK_PROCESS_CPUTIME_ID, &process);
        posix_timers_register_clock(CLOCK_THREAD_CPUTIME_ID, &thread);
 
-       cputime_to_timespec(cputime_one_jiffy, &ts);
-       onecputick = ts.tv_nsec;
-       WARN_ON(ts.tv_sec != 0);
-
        return 0;
 }
 __initcall(init_posix_cpu_timers);
index 3109204c87cca83704d10326f8b179671577f3d6..987e496bb51a9cc84c92bedc62cf8d69e8e85668 100644 (file)
  */
 
 static struct tick_device tick_broadcast_device;
-static cpumask_var_t tick_broadcast_mask;
-static cpumask_var_t tick_broadcast_on;
-static cpumask_var_t tmpmask;
-static DEFINE_RAW_SPINLOCK(tick_broadcast_lock);
+static cpumask_var_t tick_broadcast_mask __cpumask_var_read_mostly;
+static cpumask_var_t tick_broadcast_on __cpumask_var_read_mostly;
+static cpumask_var_t tmpmask __cpumask_var_read_mostly;
 static int tick_broadcast_forced;
 
+static __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(tick_broadcast_lock);
+
 #ifdef CONFIG_TICK_ONESHOT
 static void tick_broadcast_clear_oneshot(int cpu);
 static void tick_resume_broadcast_oneshot(struct clock_event_device *bc);
@@ -347,17 +348,16 @@ static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
  *
  * Called when the system enters a state where affected tick devices
  * might stop. Note: TICK_BROADCAST_FORCE cannot be undone.
- *
- * Called with interrupts disabled, so clockevents_lock is not
- * required here because the local clock event device cannot go away
- * under us.
  */
 void tick_broadcast_control(enum tick_broadcast_mode mode)
 {
        struct clock_event_device *bc, *dev;
        struct tick_device *td;
        int cpu, bc_stopped;
+       unsigned long flags;
 
+       /* Protects also the local clockevent device. */
+       raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
        td = this_cpu_ptr(&tick_cpu_device);
        dev = td->evtdev;
 
@@ -365,12 +365,11 @@ void tick_broadcast_control(enum tick_broadcast_mode mode)
         * Is the device not affected by the powerstate ?
         */
        if (!dev || !(dev->features & CLOCK_EVT_FEAT_C3STOP))
-               return;
+               goto out;
 
        if (!tick_device_is_functional(dev))
-               return;
+               goto out;
 
-       raw_spin_lock(&tick_broadcast_lock);
        cpu = smp_processor_id();
        bc = tick_broadcast_device.evtdev;
        bc_stopped = cpumask_empty(tick_broadcast_mask);
@@ -420,7 +419,8 @@ void tick_broadcast_control(enum tick_broadcast_mode mode)
                                tick_broadcast_setup_oneshot(bc);
                }
        }
-       raw_spin_unlock(&tick_broadcast_lock);
+out:
+       raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
 }
 EXPORT_SYMBOL_GPL(tick_broadcast_control);
 
@@ -517,9 +517,9 @@ void tick_resume_broadcast(void)
 
 #ifdef CONFIG_TICK_ONESHOT
 
-static cpumask_var_t tick_broadcast_oneshot_mask;
-static cpumask_var_t tick_broadcast_pending_mask;
-static cpumask_var_t tick_broadcast_force_mask;
+static cpumask_var_t tick_broadcast_oneshot_mask __cpumask_var_read_mostly;
+static cpumask_var_t tick_broadcast_pending_mask __cpumask_var_read_mostly;
+static cpumask_var_t tick_broadcast_force_mask __cpumask_var_read_mostly;
 
 /*
  * Exposed for debugging: see timer_list.c
index 74e0388cc88d4d17b340d102ec8e053257d6ef55..2c115fdab39765f4ecbeaade9fa74008e711d08a 100644 (file)
@@ -767,7 +767,7 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
        tick = expires;
 
        /* Skip reprogram of event if its not changed */
-       if (ts->tick_stopped && (expires == ts->next_tick))
+       if (ts->tick_stopped && (expires == dev->next_event))
                goto out;
 
        /*
@@ -787,8 +787,6 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
                trace_tick_stop(1, TICK_DEP_MASK_NONE);
        }
 
-       ts->next_tick = tick;
-
        /*
         * If the expiration time == KTIME_MAX, then we simply stop
         * the tick timer.
@@ -804,10 +802,7 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
        else
                tick_program_event(tick, 1);
 out:
-       /*
-        * Update the estimated sleep length until the next timer
-        * (not only the tick).
-        */
+       /* Update the estimated sleep length */
        ts->sleep_length = ktime_sub(dev->next_event, now);
        return tick;
 }
index 075444e3d48e643549ba5c7fbcc3c792fb90e1db..bf38226e5c17c15e276c2e4e8c5f4fe423e071f3 100644 (file)
@@ -27,7 +27,6 @@ enum tick_nohz_mode {
  *                     timer is modified for nohz sleeps. This is necessary
  *                     to resume the tick timer operation in the timeline
  *                     when the CPU returns from nohz sleep.
- * @next_tick:         Next tick to be fired when in dynticks mode.
  * @tick_stopped:      Indicator that the idle tick has been stopped
  * @idle_jiffies:      jiffies at the entry to idle for idle time accounting
  * @idle_calls:                Total number of idle calls
@@ -45,7 +44,6 @@ struct tick_sched {
        unsigned long                   check_clocks;
        enum tick_nohz_mode             nohz_mode;
        ktime_t                         last_tick;
-       ktime_t                         next_tick;
        int                             inidle;
        int                             tick_stopped;
        unsigned long                   idle_jiffies;
index a3a9a8a029dc73bc0d1df540d7b2919071573bee..25bdd250457192df0a2dfc13e6c9078bb92be0d6 100644 (file)
@@ -702,6 +702,16 @@ u64 nsec_to_clock_t(u64 x)
 #endif
 }
 
+u64 jiffies64_to_nsecs(u64 j)
+{
+#if !(NSEC_PER_SEC % HZ)
+       return (NSEC_PER_SEC / HZ) * j;
+# else
+       return div_u64(j * HZ_TO_NSEC_NUM, HZ_TO_NSEC_DEN);
+#endif
+}
+EXPORT_SYMBOL(jiffies64_to_nsecs);
+
 /**
  * nsecs_to_jiffies64 - Convert nsecs in u64 to jiffies64
  *
index c48688904f9fec9dc033239e28c0bdcbc761e59d..f83bbb81600b01c489249bcd2cccad233b6d95ae 100644 (file)
@@ -98,6 +98,12 @@ define timeconst(hz) {
                print "#define HZ_TO_USEC_DEN\t\t", hz/cd, "\n"
                print "#define USEC_TO_HZ_NUM\t\t", hz/cd, "\n"
                print "#define USEC_TO_HZ_DEN\t\t", 1000000/cd, "\n"
+
+               cd=gcd(hz,1000000000)
+               print "#define HZ_TO_NSEC_NUM\t\t", 1000000000/cd, "\n"
+               print "#define HZ_TO_NSEC_DEN\t\t", hz/cd, "\n"
+               print "#define NSEC_TO_HZ_NUM\t\t", hz/cd, "\n"
+               print "#define NSEC_TO_HZ_DEN\t\t", 1000000000/cd, "\n"
                print "\n"
 
                print "#endif /* KERNEL_TIMECONST_H */\n"
index db087d7e106d25f8c542f3252443d260d5618169..95b258dd75dbb152d7eea3043a0f71d6a61617e5 100644 (file)
@@ -1275,27 +1275,8 @@ error: /* even if we error out, we forwarded the time, so call update */
 }
 EXPORT_SYMBOL(timekeeping_inject_offset);
 
-
-/**
- * timekeeping_get_tai_offset - Returns current TAI offset from UTC
- *
- */
-s32 timekeeping_get_tai_offset(void)
-{
-       struct timekeeper *tk = &tk_core.timekeeper;
-       unsigned int seq;
-       s32 ret;
-
-       do {
-               seq = read_seqcount_begin(&tk_core.seq);
-               ret = tk->tai_offset;
-       } while (read_seqcount_retry(&tk_core.seq, seq));
-
-       return ret;
-}
-
 /**
- * __timekeeping_set_tai_offset - Lock free worker function
+ * __timekeeping_set_tai_offset - Sets the TAI offset from UTC and monotonic
  *
  */
 static void __timekeeping_set_tai_offset(struct timekeeper *tk, s32 tai_offset)
@@ -1304,24 +1285,6 @@ static void __timekeeping_set_tai_offset(struct timekeeper *tk, s32 tai_offset)
        tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tai_offset, 0));
 }
 
-/**
- * timekeeping_set_tai_offset - Sets the current TAI offset from UTC
- *
- */
-void timekeeping_set_tai_offset(s32 tai_offset)
-{
-       struct timekeeper *tk = &tk_core.timekeeper;
-       unsigned long flags;
-
-       raw_spin_lock_irqsave(&timekeeper_lock, flags);
-       write_seqcount_begin(&tk_core.seq);
-       __timekeeping_set_tai_offset(tk, tai_offset);
-       timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
-       write_seqcount_end(&tk_core.seq);
-       raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
-       clock_was_set();
-}
-
 /**
  * change_clocksource - Swaps clocksources if a new one is available
  *
index 704f595ce83f03090f3f6d63a4d792703b4084a4..d0914676d4c5b28b2d1828deb0da2361dc9bee34 100644 (file)
@@ -11,8 +11,6 @@ extern ktime_t ktime_get_update_offsets_now(unsigned int *cwsseq,
 extern int timekeeping_valid_for_hres(void);
 extern u64 timekeeping_max_deferment(void);
 extern int timekeeping_inject_offset(struct timespec *ts);
-extern s32 timekeeping_get_tai_offset(void);
-extern void timekeeping_set_tai_offset(s32 tai_offset);
 extern int timekeeping_suspend(void);
 extern void timekeeping_resume(void);
 
index ca9fb800336b2904e4c19069ad8c9e29ea926830..38bc4d2208e80bb398f411c2eafb3d2c599f1bc7 100644 (file)
@@ -75,7 +75,7 @@ void tk_debug_account_sleep_time(struct timespec64 *t)
        int bin = min(fls(t->tv_sec), NUM_BINS-1);
 
        sleep_time_bin[bin]++;
-       pr_info("Suspended for %lld.%03lu seconds\n", (s64)t->tv_sec,
-                       t->tv_nsec / NSEC_PER_MSEC);
+       printk_deferred(KERN_INFO "Suspended for %lld.%03lu seconds\n",
+                       (s64)t->tv_sec, t->tv_nsec / NSEC_PER_MSEC);
 }
 
index ec33a6933eaed5938b95e381c2eeabb8d02dd195..82a6bfa0c30789dd5e09f93da1ca88d74fe466ad 100644 (file)
@@ -571,38 +571,6 @@ internal_add_timer(struct timer_base *base, struct timer_list *timer)
        trigger_dyntick_cpu(base, timer);
 }
 
-#ifdef CONFIG_TIMER_STATS
-void __timer_stats_timer_set_start_info(struct timer_list *timer, void *addr)
-{
-       if (timer->start_site)
-               return;
-
-       timer->start_site = addr;
-       memcpy(timer->start_comm, current->comm, TASK_COMM_LEN);
-       timer->start_pid = current->pid;
-}
-
-static void timer_stats_account_timer(struct timer_list *timer)
-{
-       void *site;
-
-       /*
-        * start_site can be concurrently reset by
-        * timer_stats_timer_clear_start_info()
-        */
-       site = READ_ONCE(timer->start_site);
-       if (likely(!site))
-               return;
-
-       timer_stats_update_stats(timer, timer->start_pid, site,
-                                timer->function, timer->start_comm,
-                                timer->flags);
-}
-
-#else
-static void timer_stats_account_timer(struct timer_list *timer) {}
-#endif
-
 #ifdef CONFIG_DEBUG_OBJECTS_TIMERS
 
 static struct debug_obj_descr timer_debug_descr;
@@ -789,11 +757,6 @@ static void do_init_timer(struct timer_list *timer, unsigned int flags,
 {
        timer->entry.pprev = NULL;
        timer->flags = flags | raw_smp_processor_id();
-#ifdef CONFIG_TIMER_STATS
-       timer->start_site = NULL;
-       timer->start_pid = -1;
-       memset(timer->start_comm, 0, TASK_COMM_LEN);
-#endif
        lockdep_init_map(&timer->lockdep_map, name, key, 0);
 }
 
@@ -1001,8 +964,6 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
                base = lock_timer_base(timer, &flags);
        }
 
-       timer_stats_timer_set_start_info(timer);
-
        ret = detach_if_pending(timer, base, false);
        if (!ret && pending_only)
                goto out_unlock;
@@ -1130,7 +1091,6 @@ void add_timer_on(struct timer_list *timer, int cpu)
        struct timer_base *new_base, *base;
        unsigned long flags;
 
-       timer_stats_timer_set_start_info(timer);
        BUG_ON(timer_pending(timer) || !timer->function);
 
        new_base = get_timer_cpu_base(timer->flags, cpu);
@@ -1176,7 +1136,6 @@ int del_timer(struct timer_list *timer)
 
        debug_assert_init(timer);
 
-       timer_stats_timer_clear_start_info(timer);
        if (timer_pending(timer)) {
                base = lock_timer_base(timer, &flags);
                ret = detach_if_pending(timer, base, true);
@@ -1204,10 +1163,9 @@ int try_to_del_timer_sync(struct timer_list *timer)
 
        base = lock_timer_base(timer, &flags);
 
-       if (base->running_timer != timer) {
-               timer_stats_timer_clear_start_info(timer);
+       if (base->running_timer != timer)
                ret = detach_if_pending(timer, base, true);
-       }
+
        spin_unlock_irqrestore(&base->lock, flags);
 
        return ret;
@@ -1331,7 +1289,6 @@ static void expire_timers(struct timer_base *base, struct hlist_head *head)
                unsigned long data;
 
                timer = hlist_entry(head->first, struct timer_list, entry);
-               timer_stats_account_timer(timer);
 
                base->running_timer = timer;
                detach_timer(timer, true);
@@ -1868,7 +1825,6 @@ static void __init init_timer_cpus(void)
 void __init init_timers(void)
 {
        init_timer_cpus();
-       init_timer_stats();
        open_softirq(TIMER_SOFTIRQ, run_timer_softirq);
 }
 
index afe6cd1944fc5df3e4755f961719889bf0d72532..ff8d5c13d04bd0911c62584b6f2764b7a27cb89a 100644 (file)
@@ -62,21 +62,11 @@ static void
 print_timer(struct seq_file *m, struct hrtimer *taddr, struct hrtimer *timer,
            int idx, u64 now)
 {
-#ifdef CONFIG_TIMER_STATS
-       char tmp[TASK_COMM_LEN + 1];
-#endif
        SEQ_printf(m, " #%d: ", idx);
        print_name_offset(m, taddr);
        SEQ_printf(m, ", ");
        print_name_offset(m, timer->function);
        SEQ_printf(m, ", S:%02x", timer->state);
-#ifdef CONFIG_TIMER_STATS
-       SEQ_printf(m, ", ");
-       print_name_offset(m, timer->start_site);
-       memcpy(tmp, timer->start_comm, TASK_COMM_LEN);
-       tmp[TASK_COMM_LEN] = 0;
-       SEQ_printf(m, ", %s/%d", tmp, timer->start_pid);
-#endif
        SEQ_printf(m, "\n");
        SEQ_printf(m, " # expires at %Lu-%Lu nsecs [in %Ld to %Ld nsecs]\n",
                (unsigned long long)ktime_to_ns(hrtimer_get_softexpires(timer)),
@@ -127,7 +117,7 @@ print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
        SEQ_printf(m, "  .base:       %pK\n", base);
        SEQ_printf(m, "  .index:      %d\n", base->index);
 
-       SEQ_printf(m, "  .resolution: %u nsecs\n", (unsigned) hrtimer_resolution);
+       SEQ_printf(m, "  .resolution: %u nsecs\n", hrtimer_resolution);
 
        SEQ_printf(m,   "  .get_time:   ");
        print_name_offset(m, base->get_time);
diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
deleted file mode 100644 (file)
index afddded..0000000
+++ /dev/null
@@ -1,425 +0,0 @@
-/*
- * kernel/time/timer_stats.c
- *
- * Collect timer usage statistics.
- *
- * Copyright(C) 2006, Red Hat, Inc., Ingo Molnar
- * Copyright(C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
- *
- * timer_stats is based on timer_top, a similar functionality which was part of
- * Con Kolivas dyntick patch set. It was developed by Daniel Petrini at the
- * Instituto Nokia de Tecnologia - INdT - Manaus. timer_top's design was based
- * on dynamic allocation of the statistics entries and linear search based
- * lookup combined with a global lock, rather than the static array, hash
- * and per-CPU locking which is used by timer_stats. It was written for the
- * pre hrtimer kernel code and therefore did not take hrtimers into account.
- * Nevertheless it provided the base for the timer_stats implementation and
- * was a helpful source of inspiration. Kudos to Daniel and the Nokia folks
- * for this effort.
- *
- * timer_top.c is
- *     Copyright (C) 2005 Instituto Nokia de Tecnologia - INdT - Manaus
- *     Written by Daniel Petrini <d.pensator@gmail.com>
- *     timer_top.c was released under the GNU General Public License version 2
- *
- * We export the addresses and counting of timer functions being called,
- * the pid and cmdline from the owner process if applicable.
- *
- * Start/stop data collection:
- * # echo [1|0] >/proc/timer_stats
- *
- * Display the information collected so far:
- * # cat /proc/timer_stats
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/proc_fs.h>
-#include <linux/module.h>
-#include <linux/spinlock.h>
-#include <linux/sched.h>
-#include <linux/seq_file.h>
-#include <linux/kallsyms.h>
-
-#include <linux/uaccess.h>
-
-/*
- * This is our basic unit of interest: a timer expiry event identified
- * by the timer, its start/expire functions and the PID of the task that
- * started the timer. We count the number of times an event happens:
- */
-struct entry {
-       /*
-        * Hash list:
-        */
-       struct entry            *next;
-
-       /*
-        * Hash keys:
-        */
-       void                    *timer;
-       void                    *start_func;
-       void                    *expire_func;
-       pid_t                   pid;
-
-       /*
-        * Number of timeout events:
-        */
-       unsigned long           count;
-       u32                     flags;
-
-       /*
-        * We save the command-line string to preserve
-        * this information past task exit:
-        */
-       char                    comm[TASK_COMM_LEN + 1];
-
-} ____cacheline_aligned_in_smp;
-
-/*
- * Spinlock protecting the tables - not taken during lookup:
- */
-static DEFINE_RAW_SPINLOCK(table_lock);
-
-/*
- * Per-CPU lookup locks for fast hash lookup:
- */
-static DEFINE_PER_CPU(raw_spinlock_t, tstats_lookup_lock);
-
-/*
- * Mutex to serialize state changes with show-stats activities:
- */
-static DEFINE_MUTEX(show_mutex);
-
-/*
- * Collection status, active/inactive:
- */
-int __read_mostly timer_stats_active;
-
-/*
- * Beginning/end timestamps of measurement:
- */
-static ktime_t time_start, time_stop;
-
-/*
- * tstat entry structs only get allocated while collection is
- * active and never freed during that time - this simplifies
- * things quite a bit.
- *
- * They get freed when a new collection period is started.
- */
-#define MAX_ENTRIES_BITS       10
-#define MAX_ENTRIES            (1UL << MAX_ENTRIES_BITS)
-
-static unsigned long nr_entries;
-static struct entry entries[MAX_ENTRIES];
-
-static atomic_t overflow_count;
-
-/*
- * The entries are in a hash-table, for fast lookup:
- */
-#define TSTAT_HASH_BITS                (MAX_ENTRIES_BITS - 1)
-#define TSTAT_HASH_SIZE                (1UL << TSTAT_HASH_BITS)
-#define TSTAT_HASH_MASK                (TSTAT_HASH_SIZE - 1)
-
-#define __tstat_hashfn(entry)                                          \
-       (((unsigned long)(entry)->timer       ^                         \
-         (unsigned long)(entry)->start_func  ^                         \
-         (unsigned long)(entry)->expire_func ^                         \
-         (unsigned long)(entry)->pid           ) & TSTAT_HASH_MASK)
-
-#define tstat_hashentry(entry) (tstat_hash_table + __tstat_hashfn(entry))
-
-static struct entry *tstat_hash_table[TSTAT_HASH_SIZE] __read_mostly;
-
-static void reset_entries(void)
-{
-       nr_entries = 0;
-       memset(entries, 0, sizeof(entries));
-       memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
-       atomic_set(&overflow_count, 0);
-}
-
-static struct entry *alloc_entry(void)
-{
-       if (nr_entries >= MAX_ENTRIES)
-               return NULL;
-
-       return entries + nr_entries++;
-}
-
-static int match_entries(struct entry *entry1, struct entry *entry2)
-{
-       return entry1->timer       == entry2->timer       &&
-              entry1->start_func  == entry2->start_func  &&
-              entry1->expire_func == entry2->expire_func &&
-              entry1->pid         == entry2->pid;
-}
-
-/*
- * Look up whether an entry matching this item is present
- * in the hash already. Must be called with irqs off and the
- * lookup lock held:
- */
-static struct entry *tstat_lookup(struct entry *entry, char *comm)
-{
-       struct entry **head, *curr, *prev;
-
-       head = tstat_hashentry(entry);
-       curr = *head;
-
-       /*
-        * The fastpath is when the entry is already hashed,
-        * we do this with the lookup lock held, but with the
-        * table lock not held:
-        */
-       while (curr) {
-               if (match_entries(curr, entry))
-                       return curr;
-
-               curr = curr->next;
-       }
-       /*
-        * Slowpath: allocate, set up and link a new hash entry:
-        */
-       prev = NULL;
-       curr = *head;
-
-       raw_spin_lock(&table_lock);
-       /*
-        * Make sure we have not raced with another CPU:
-        */
-       while (curr) {
-               if (match_entries(curr, entry))
-                       goto out_unlock;
-
-               prev = curr;
-               curr = curr->next;
-       }
-
-       curr = alloc_entry();
-       if (curr) {
-               *curr = *entry;
-               curr->count = 0;
-               curr->next = NULL;
-               memcpy(curr->comm, comm, TASK_COMM_LEN);
-
-               smp_mb(); /* Ensure that curr is initialized before insert */
-
-               if (prev)
-                       prev->next = curr;
-               else
-                       *head = curr;
-       }
- out_unlock:
-       raw_spin_unlock(&table_lock);
-
-       return curr;
-}
-
-/**
- * timer_stats_update_stats - Update the statistics for a timer.
- * @timer:     pointer to either a timer_list or a hrtimer
- * @pid:       the pid of the task which set up the timer
- * @startf:    pointer to the function which did the timer setup
- * @timerf:    pointer to the timer callback function of the timer
- * @comm:      name of the process which set up the timer
- * @tflags:    The flags field of the timer
- *
- * When the timer is already registered, then the event counter is
- * incremented. Otherwise the timer is registered in a free slot.
- */
-void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
-                             void *timerf, char *comm, u32 tflags)
-{
-       /*
-        * It doesn't matter which lock we take:
-        */
-       raw_spinlock_t *lock;
-       struct entry *entry, input;
-       unsigned long flags;
-
-       if (likely(!timer_stats_active))
-               return;
-
-       lock = &per_cpu(tstats_lookup_lock, raw_smp_processor_id());
-
-       input.timer = timer;
-       input.start_func = startf;
-       input.expire_func = timerf;
-       input.pid = pid;
-       input.flags = tflags;
-
-       raw_spin_lock_irqsave(lock, flags);
-       if (!timer_stats_active)
-               goto out_unlock;
-
-       entry = tstat_lookup(&input, comm);
-       if (likely(entry))
-               entry->count++;
-       else
-               atomic_inc(&overflow_count);
-
- out_unlock:
-       raw_spin_unlock_irqrestore(lock, flags);
-}
-
-static void print_name_offset(struct seq_file *m, unsigned long addr)
-{
-       char symname[KSYM_NAME_LEN];
-
-       if (lookup_symbol_name(addr, symname) < 0)
-               seq_printf(m, "<%p>", (void *)addr);
-       else
-               seq_printf(m, "%s", symname);
-}
-
-static int tstats_show(struct seq_file *m, void *v)
-{
-       struct timespec64 period;
-       struct entry *entry;
-       unsigned long ms;
-       long events = 0;
-       ktime_t time;
-       int i;
-
-       mutex_lock(&show_mutex);
-       /*
-        * If still active then calculate up to now:
-        */
-       if (timer_stats_active)
-               time_stop = ktime_get();
-
-       time = ktime_sub(time_stop, time_start);
-
-       period = ktime_to_timespec64(time);
-       ms = period.tv_nsec / 1000000;
-
-       seq_puts(m, "Timer Stats Version: v0.3\n");
-       seq_printf(m, "Sample period: %ld.%03ld s\n", (long)period.tv_sec, ms);
-       if (atomic_read(&overflow_count))
-               seq_printf(m, "Overflow: %d entries\n", atomic_read(&overflow_count));
-       seq_printf(m, "Collection: %s\n", timer_stats_active ? "active" : "inactive");
-
-       for (i = 0; i < nr_entries; i++) {
-               entry = entries + i;
-               if (entry->flags & TIMER_DEFERRABLE) {
-                       seq_printf(m, "%4luD, %5d %-16s ",
-                               entry->count, entry->pid, entry->comm);
-               } else {
-                       seq_printf(m, " %4lu, %5d %-16s ",
-                               entry->count, entry->pid, entry->comm);
-               }
-
-               print_name_offset(m, (unsigned long)entry->start_func);
-               seq_puts(m, " (");
-               print_name_offset(m, (unsigned long)entry->expire_func);
-               seq_puts(m, ")\n");
-
-               events += entry->count;
-       }
-
-       ms += period.tv_sec * 1000;
-       if (!ms)
-               ms = 1;
-
-       if (events && period.tv_sec)
-               seq_printf(m, "%ld total events, %ld.%03ld events/sec\n",
-                          events, events * 1000 / ms,
-                          (events * 1000000 / ms) % 1000);
-       else
-               seq_printf(m, "%ld total events\n", events);
-
-       mutex_unlock(&show_mutex);
-
-       return 0;
-}
-
-/*
- * After a state change, make sure all concurrent lookup/update
- * activities have stopped:
- */
-static void sync_access(void)
-{
-       unsigned long flags;
-       int cpu;
-
-       for_each_online_cpu(cpu) {
-               raw_spinlock_t *lock = &per_cpu(tstats_lookup_lock, cpu);
-
-               raw_spin_lock_irqsave(lock, flags);
-               /* nothing */
-               raw_spin_unlock_irqrestore(lock, flags);
-       }
-}
-
-static ssize_t tstats_write(struct file *file, const char __user *buf,
-                           size_t count, loff_t *offs)
-{
-       char ctl[2];
-
-       if (count != 2 || *offs)
-               return -EINVAL;
-
-       if (copy_from_user(ctl, buf, count))
-               return -EFAULT;
-
-       mutex_lock(&show_mutex);
-       switch (ctl[0]) {
-       case '0':
-               if (timer_stats_active) {
-                       timer_stats_active = 0;
-                       time_stop = ktime_get();
-                       sync_access();
-               }
-               break;
-       case '1':
-               if (!timer_stats_active) {
-                       reset_entries();
-                       time_start = ktime_get();
-                       smp_mb();
-                       timer_stats_active = 1;
-               }
-               break;
-       default:
-               count = -EINVAL;
-       }
-       mutex_unlock(&show_mutex);
-
-       return count;
-}
-
-static int tstats_open(struct inode *inode, struct file *filp)
-{
-       return single_open(filp, tstats_show, NULL);
-}
-
-static const struct file_operations tstats_fops = {
-       .open           = tstats_open,
-       .read           = seq_read,
-       .write          = tstats_write,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-};
-
-void __init init_timer_stats(void)
-{
-       int cpu;
-
-       for_each_possible_cpu(cpu)
-               raw_spin_lock_init(&per_cpu(tstats_lookup_lock, cpu));
-}
-
-static int __init init_tstats_procfs(void)
-{
-       struct proc_dir_entry *pe;
-
-       pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
-       if (!pe)
-               return -ENOMEM;
-       return 0;
-}
-__initcall(init_tstats_procfs);
index 775569ec50d03fbf0ca4f755f79d106a63acaf1a..af344a1bf0d0e6270e5e659ffa160753e148cdd9 100644 (file)
@@ -266,7 +266,7 @@ out:
 static struct cpumask save_cpumask;
 static bool disable_migrate;
 
-static void move_to_next_cpu(void)
+static void move_to_next_cpu(bool initmask)
 {
        static struct cpumask *current_mask;
        int next_cpu;
@@ -275,7 +275,7 @@ static void move_to_next_cpu(void)
                return;
 
        /* Just pick the first CPU on first iteration */
-       if (!current_mask) {
+       if (initmask) {
                current_mask = &save_cpumask;
                get_online_cpus();
                cpumask_and(current_mask, cpu_online_mask, tracing_buffer_mask);
@@ -330,10 +330,12 @@ static void move_to_next_cpu(void)
 static int kthread_fn(void *data)
 {
        u64 interval;
+       bool initmask = true;
 
        while (!kthread_should_stop()) {
 
-               move_to_next_cpu();
+               move_to_next_cpu(initmask);
+               initmask = false;
 
                local_irq_disable();
                get_sample();
index a133ecd741e437d938ca377cf5e4358bcb586aa4..7ad9e53ad174bc6cdb0f99490f87e419e9381b02 100644 (file)
@@ -1372,7 +1372,7 @@ kprobe_trace_selftest_target(int a1, int a2, int a3, int a4, int a5, int a6)
        return a1 + a2 + a3 + a4 + a5 + a6;
 }
 
-static struct __init trace_event_file *
+static __init struct trace_event_file *
 find_trace_probe_file(struct trace_kprobe *tk, struct trace_array *tr)
 {
        struct trace_event_file *file;
index f8e26ab963ed2fc057db395bee59d6f25307b5d5..5c21f053505655a8723e8fa85bc78ae3e2d281eb 100644 (file)
@@ -31,7 +31,7 @@ void bacct_add_tsk(struct user_namespace *user_ns,
                   struct taskstats *stats, struct task_struct *tsk)
 {
        const struct cred *tcred;
-       cputime_t utime, stime, utimescaled, stimescaled;
+       u64 utime, stime, utimescaled, stimescaled;
        u64 delta;
 
        BUILD_BUG_ON(TS_COMM_LEN < TASK_COMM_LEN);
@@ -67,12 +67,12 @@ void bacct_add_tsk(struct user_namespace *user_ns,
        rcu_read_unlock();
 
        task_cputime(tsk, &utime, &stime);
-       stats->ac_utime = cputime_to_usecs(utime);
-       stats->ac_stime = cputime_to_usecs(stime);
+       stats->ac_utime = div_u64(utime, NSEC_PER_USEC);
+       stats->ac_stime = div_u64(stime, NSEC_PER_USEC);
 
        task_cputime_scaled(tsk, &utimescaled, &stimescaled);
-       stats->ac_utimescaled = cputime_to_usecs(utimescaled);
-       stats->ac_stimescaled = cputime_to_usecs(stimescaled);
+       stats->ac_utimescaled = div_u64(utimescaled, NSEC_PER_USEC);
+       stats->ac_stimescaled = div_u64(stimescaled, NSEC_PER_USEC);
 
        stats->ac_minflt = tsk->min_flt;
        stats->ac_majflt = tsk->maj_flt;
@@ -123,18 +123,15 @@ void xacct_add_tsk(struct taskstats *stats, struct task_struct *p)
 #undef MB
 
 static void __acct_update_integrals(struct task_struct *tsk,
-                                   cputime_t utime, cputime_t stime)
+                                   u64 utime, u64 stime)
 {
-       cputime_t time, dtime;
-       u64 delta;
+       u64 time, delta;
 
        if (!likely(tsk->mm))
                return;
 
        time = stime + utime;
-       dtime = time - tsk->acct_timexpd;
-       /* Avoid division: cputime_t is often in nanoseconds already. */
-       delta = cputime_to_nsecs(dtime);
+       delta = time - tsk->acct_timexpd;
 
        if (delta < TICK_NSEC)
                return;
@@ -155,7 +152,7 @@ static void __acct_update_integrals(struct task_struct *tsk,
  */
 void acct_update_integrals(struct task_struct *tsk)
 {
-       cputime_t utime, stime;
+       u64 utime, stime;
        unsigned long flags;
 
        local_irq_save(flags);
index 9d20d5dd298af25d0cd95635e217180601703959..95c6336fc2b33c6ea47611e9f9974d8521bc08fc 100644 (file)
@@ -128,10 +128,10 @@ static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid)
        struct hlist_head *hashent = ucounts_hashentry(ns, uid);
        struct ucounts *ucounts, *new;
 
-       spin_lock(&ucounts_lock);
+       spin_lock_irq(&ucounts_lock);
        ucounts = find_ucounts(ns, uid, hashent);
        if (!ucounts) {
-               spin_unlock(&ucounts_lock);
+               spin_unlock_irq(&ucounts_lock);
 
                new = kzalloc(sizeof(*new), GFP_KERNEL);
                if (!new)
@@ -141,7 +141,7 @@ static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid)
                new->uid = uid;
                atomic_set(&new->count, 0);
 
-               spin_lock(&ucounts_lock);
+               spin_lock_irq(&ucounts_lock);
                ucounts = find_ucounts(ns, uid, hashent);
                if (ucounts) {
                        kfree(new);
@@ -152,16 +152,18 @@ static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid)
        }
        if (!atomic_add_unless(&ucounts->count, 1, INT_MAX))
                ucounts = NULL;
-       spin_unlock(&ucounts_lock);
+       spin_unlock_irq(&ucounts_lock);
        return ucounts;
 }
 
 static void put_ucounts(struct ucounts *ucounts)
 {
+       unsigned long flags;
+
        if (atomic_dec_and_test(&ucounts->count)) {
-               spin_lock(&ucounts_lock);
+               spin_lock_irqsave(&ucounts_lock, flags);
                hlist_del_init(&ucounts->node);
-               spin_unlock(&ucounts_lock);
+               spin_unlock_irqrestore(&ucounts_lock, flags);
 
                kfree(ucounts);
        }
@@ -225,11 +227,10 @@ static __init int user_namespace_sysctl_init(void)
         * properly.
         */
        user_header = register_sysctl("user", empty);
+       kmemleak_ignore(user_header);
        BUG_ON(!user_header);
        BUG_ON(!setup_userns_sysctls(&init_user_ns));
 #endif
        return 0;
 }
 subsys_initcall(user_namespace_sysctl_init);
-
-
index d4b0fa01cae39cd720661d7a62f50a7926f9db69..63177be0159e9493f6d6ade90efae743aaf117b7 100644 (file)
@@ -49,6 +49,8 @@ unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask);
 #define for_each_watchdog_cpu(cpu) \
        for_each_cpu_and((cpu), cpu_online_mask, &watchdog_cpumask)
 
+atomic_t watchdog_park_in_progress = ATOMIC_INIT(0);
+
 /*
  * The 'watchdog_running' variable is set to 1 when the watchdog threads
  * are registered/started and is set to 0 when the watchdog threads are
@@ -260,6 +262,9 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
        int duration;
        int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace;
 
+       if (atomic_read(&watchdog_park_in_progress) != 0)
+               return HRTIMER_NORESTART;
+
        /* kick the hardlockup detector */
        watchdog_interrupt_count();
 
@@ -467,12 +472,16 @@ static int watchdog_park_threads(void)
 {
        int cpu, ret = 0;
 
+       atomic_set(&watchdog_park_in_progress, 1);
+
        for_each_watchdog_cpu(cpu) {
                ret = kthread_park(per_cpu(softlockup_watchdog, cpu));
                if (ret)
                        break;
        }
 
+       atomic_set(&watchdog_park_in_progress, 0);
+
        return ret;
 }
 
index 84016c8aee6b5d2769495a8c6eee0b4ac559b1a6..12b8dd64078655dd9004d03caa8167da16b57cf5 100644 (file)
@@ -84,6 +84,9 @@ static void watchdog_overflow_callback(struct perf_event *event,
        /* Ensure the watchdog never gets throttled */
        event->hw.interrupts = 0;
 
+       if (atomic_read(&watchdog_park_in_progress) != 0)
+               return;
+
        if (__this_cpu_read(watchdog_nmi_touch) == true) {
                __this_cpu_write(watchdog_nmi_touch, false);
                return;
index 1d9fb6543a66a26c3094f8f2d05ab4447782dadd..072cbc9b175dc1efbe95c14858f810f92db12130 100644 (file)
@@ -1523,8 +1523,6 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
                return;
        }
 
-       timer_stats_timer_set_start_info(&dwork->timer);
-
        dwork->wq = wq;
        dwork->cpu = cpu;
        timer->expires = jiffies + delay;
index eb9e9a7870fa7bdb0f373f858037c1026880c85f..acedbe626d4798347312b828d9cbafeba117e7da 100644 (file)
@@ -716,6 +716,19 @@ source "lib/Kconfig.kmemcheck"
 
 source "lib/Kconfig.kasan"
 
+config DEBUG_REFCOUNT
+       bool "Verbose refcount checks"
+       help
+         Say Y here if you want reference counters (refcount_t and kref) to
+         generate WARNs on dubious usage. Without this refcount_t will still
+         be a saturating counter and avoid Use-After-Free by turning it into
+         a resource leak Denial-Of-Service.
+
+         Use of this option will increase kernel text size but will alert the
+         admin of potential abuse.
+
+         If in doubt, say "N".
+
 endmenu # "Memory Debugging"
 
 config ARCH_HAS_KCOV
@@ -980,20 +993,6 @@ config DEBUG_TIMEKEEPING
 
          If unsure, say N.
 
-config TIMER_STATS
-       bool "Collect kernel timers statistics"
-       depends on DEBUG_KERNEL && PROC_FS
-       help
-         If you say Y here, additional code will be inserted into the
-         timer routines to collect statistics about kernel timers being
-         reprogrammed. The statistics can be read from /proc/timer_stats.
-         The statistics collection is started by writing 1 to /proc/timer_stats,
-         writing 0 stops it. This feature is useful to collect information
-         about timer usage patterns in kernel and userspace. This feature
-         is lightweight if enabled in the kernel config but not activated
-         (it defaults to deactivated on bootup and will only be activated
-         if some application like powertop activates it explicitly).
-
 config DEBUG_PREEMPT
        bool "Debug preemptible kernel"
        depends on DEBUG_KERNEL && PREEMPT && TRACE_IRQFLAGS_SUPPORT
@@ -1180,6 +1179,18 @@ config LOCK_TORTURE_TEST
          Say M if you want these torture tests to build as a module.
          Say N if you are unsure.
 
+config WW_MUTEX_SELFTEST
+       tristate "Wait/wound mutex selftests"
+       help
+         This option provides a kernel module that runs tests on the
+         on the struct ww_mutex locking API.
+
+         It is recommended to enable DEBUG_WW_MUTEX_SLOWPATH in conjunction
+         with this test harness.
+
+         Say M if you want these self tests to build as a module.
+         Say N if you are unsure.
+
 endmenu # lock debugging
 
 config TRACE_IRQFLAGS
@@ -1450,6 +1461,7 @@ config RCU_CPU_STALL_TIMEOUT
 config RCU_TRACE
        bool "Enable tracing for RCU"
        depends on DEBUG_KERNEL
+       default y if TREE_RCU
        select TRACE_CLOCK
        help
          This option provides tracing in RCU which presents stats
index 04c1ef717fe0c312d876a5388ae12d74d66d412b..8c28cbd7e104b6b23dd9b87e25aac2f0b6796c44 100644 (file)
@@ -52,9 +52,18 @@ static int                   debug_objects_fixups __read_mostly;
 static int                     debug_objects_warnings __read_mostly;
 static int                     debug_objects_enabled __read_mostly
                                = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
-
+static int                     debug_objects_pool_size __read_mostly
+                               = ODEBUG_POOL_SIZE;
+static int                     debug_objects_pool_min_level __read_mostly
+                               = ODEBUG_POOL_MIN_LEVEL;
 static struct debug_obj_descr  *descr_test  __read_mostly;
 
+/*
+ * Track numbers of kmem_cache_alloc()/free() calls done.
+ */
+static int                     debug_objects_allocated;
+static int                     debug_objects_freed;
+
 static void free_obj_work(struct work_struct *work);
 static DECLARE_WORK(debug_obj_work, free_obj_work);
 
@@ -88,13 +97,13 @@ static void fill_pool(void)
        struct debug_obj *new;
        unsigned long flags;
 
-       if (likely(obj_pool_free >= ODEBUG_POOL_MIN_LEVEL))
+       if (likely(obj_pool_free >= debug_objects_pool_min_level))
                return;
 
        if (unlikely(!obj_cache))
                return;
 
-       while (obj_pool_free < ODEBUG_POOL_MIN_LEVEL) {
+       while (obj_pool_free < debug_objects_pool_min_level) {
 
                new = kmem_cache_zalloc(obj_cache, gfp);
                if (!new)
@@ -102,6 +111,7 @@ static void fill_pool(void)
 
                raw_spin_lock_irqsave(&pool_lock, flags);
                hlist_add_head(&new->node, &obj_pool);
+               debug_objects_allocated++;
                obj_pool_free++;
                raw_spin_unlock_irqrestore(&pool_lock, flags);
        }
@@ -162,24 +172,39 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
 
 /*
  * workqueue function to free objects.
+ *
+ * To reduce contention on the global pool_lock, the actual freeing of
+ * debug objects will be delayed if the pool_lock is busy. We also free
+ * the objects in a batch of 4 for each lock/unlock cycle.
  */
+#define ODEBUG_FREE_BATCH      4
+
 static void free_obj_work(struct work_struct *work)
 {
-       struct debug_obj *obj;
+       struct debug_obj *objs[ODEBUG_FREE_BATCH];
        unsigned long flags;
+       int i;
 
-       raw_spin_lock_irqsave(&pool_lock, flags);
-       while (obj_pool_free > ODEBUG_POOL_SIZE) {
-               obj = hlist_entry(obj_pool.first, typeof(*obj), node);
-               hlist_del(&obj->node);
-               obj_pool_free--;
+       if (!raw_spin_trylock_irqsave(&pool_lock, flags))
+               return;
+       while (obj_pool_free >= debug_objects_pool_size + ODEBUG_FREE_BATCH) {
+               for (i = 0; i < ODEBUG_FREE_BATCH; i++) {
+                       objs[i] = hlist_entry(obj_pool.first,
+                                             typeof(*objs[0]), node);
+                       hlist_del(&objs[i]->node);
+               }
+
+               obj_pool_free -= ODEBUG_FREE_BATCH;
+               debug_objects_freed += ODEBUG_FREE_BATCH;
                /*
                 * We release pool_lock across kmem_cache_free() to
                 * avoid contention on pool_lock.
                 */
                raw_spin_unlock_irqrestore(&pool_lock, flags);
-               kmem_cache_free(obj_cache, obj);
-               raw_spin_lock_irqsave(&pool_lock, flags);
+               for (i = 0; i < ODEBUG_FREE_BATCH; i++)
+                       kmem_cache_free(obj_cache, objs[i]);
+               if (!raw_spin_trylock_irqsave(&pool_lock, flags))
+                       return;
        }
        raw_spin_unlock_irqrestore(&pool_lock, flags);
 }
@@ -198,7 +223,7 @@ static void free_object(struct debug_obj *obj)
         * schedule work when the pool is filled and the cache is
         * initialized:
         */
-       if (obj_pool_free > ODEBUG_POOL_SIZE && obj_cache)
+       if (obj_pool_free > debug_objects_pool_size && obj_cache)
                sched = 1;
        hlist_add_head(&obj->node, &obj_pool);
        obj_pool_free++;
@@ -758,6 +783,8 @@ static int debug_stats_show(struct seq_file *m, void *v)
        seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
        seq_printf(m, "pool_used     :%d\n", obj_pool_used);
        seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
+       seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated);
+       seq_printf(m, "objs_freed    :%d\n", debug_objects_freed);
        return 0;
 }
 
@@ -1116,4 +1143,11 @@ void __init debug_objects_mem_init(void)
                pr_warn("out of memory.\n");
        } else
                debug_objects_selftest();
+
+       /*
+        * Increase the thresholds for allocating and freeing objects
+        * according to the number of possible CPUs available in the system.
+        */
+       debug_objects_pool_size += num_possible_cpus() * 32;
+       debug_objects_pool_min_level += num_possible_cpus() * 4;
 }
index 86c8911b0e3a6fff02b9e52faa11816cfe508362..a3e14ce92a5684a662c2c8f80f97e6fef95943b7 100644 (file)
@@ -144,4 +144,3 @@ int ioremap_page_range(unsigned long addr,
 
        return err;
 }
-EXPORT_SYMBOL_GPL(ioremap_page_range);
index 0b92d605fb69cc805a96c8333dab36174f755e22..84812a9fb16fbbd1409315ea3752fb9a1e3e39ef 100644 (file)
@@ -769,7 +769,7 @@ static void radix_tree_free_nodes(struct radix_tree_node *node)
                        struct radix_tree_node *old = child;
                        offset = child->offset + 1;
                        child = child->parent;
-                       WARN_ON_ONCE(!list_empty(&node->private_list));
+                       WARN_ON_ONCE(!list_empty(&old->private_list));
                        radix_tree_node_free(old);
                        if (old == entry_to_node(node))
                                return;
index adc6ee0a51267ab6691c6244cddbd5da69c059f9..4a720ed4fdafd575df46159a3d51131902d638e4 100644 (file)
@@ -80,8 +80,7 @@ bool timerqueue_del(struct timerqueue_head *head, struct timerqueue_node *node)
        if (head->next == node) {
                struct rb_node *rbn = rb_next(&node->node);
 
-               head->next = rbn ?
-                       rb_entry(rbn, struct timerqueue_node, node) : NULL;
+               head->next = rb_entry_safe(rbn, struct timerqueue_node, node);
        }
        rb_erase(&node->node, &head->head);
        RB_CLEAR_NODE(&node->node);
index b772a33ef640ab0d6770bb3d249a6fe6f16eeebc..3f9afded581be1a013bda4db2c0ec3a721323364 100644 (file)
@@ -1791,6 +1791,11 @@ static ssize_t do_generic_file_read(struct file *filp, loff_t *ppos,
 
                cond_resched();
 find_page:
+               if (fatal_signal_pending(current)) {
+                       error = -EINTR;
+                       goto out;
+               }
+
                page = find_get_page(mapping, index);
                if (!page) {
                        page_cache_sync_readahead(mapping,
index 9a6bd6c8d55a6691047e516a46c2cf6b931b912d..5f3ad65c85de01fa6e4c8a07ef9494410bf2b133 100644 (file)
@@ -783,6 +783,12 @@ struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
 
        assert_spin_locked(pmd_lockptr(mm, pmd));
 
+       /*
+        * When we COW a devmap PMD entry, we split it into PTEs, so we should
+        * not be in this function with `flags & FOLL_COW` set.
+        */
+       WARN_ONCE(flags & FOLL_COW, "mm: In follow_devmap_pmd with FOLL_COW set");
+
        if (flags & FOLL_WRITE && !pmd_write(*pmd))
                return NULL;
 
@@ -1128,6 +1134,16 @@ out_unlock:
        return ret;
 }
 
+/*
+ * FOLL_FORCE can write to even unwritable pmd's, but only
+ * after we've gone through a COW cycle and they are dirty.
+ */
+static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags)
+{
+       return pmd_write(pmd) ||
+              ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd));
+}
+
 struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
                                   unsigned long addr,
                                   pmd_t *pmd,
@@ -1138,7 +1154,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
 
        assert_spin_locked(pmd_lockptr(mm, pmd));
 
-       if (flags & FOLL_WRITE && !pmd_write(*pmd))
+       if (flags & FOLL_WRITE && !can_follow_write_pmd(*pmd, flags))
                goto out;
 
        /* Avoid dumping huge zero page */
index b82b3e2151574ae1abbb2cd57624114727975410..f479365530b6484bbd5cae42064521fed362961e 100644 (file)
@@ -13,6 +13,7 @@
  *
  */
 
+#include <linux/ftrace.h>
 #include <linux/kernel.h>
 #include <linux/mm.h>
 #include <linux/printk.h>
@@ -300,6 +301,8 @@ void kasan_report(unsigned long addr, size_t size,
        if (likely(!kasan_report_enabled()))
                return;
 
+       disable_trace_on_warning();
+
        info.access_addr = (void *)addr;
        info.access_size = size;
        info.is_write = is_write;
index a63a8f8326647b92bdc63810c8a93be96047f748..b822e158b319e8f2f02ecbfe76c31b6466be51f1 100644 (file)
@@ -4353,9 +4353,9 @@ static int mem_cgroup_do_precharge(unsigned long count)
                return ret;
        }
 
-       /* Try charges one by one with reclaim */
+       /* Try charges one by one with reclaim, but do not retry */
        while (count--) {
-               ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_NORETRY, 1);
+               ret = try_charge(mc.to, GFP_KERNEL __GFP_NORETRY, 1);
                if (ret)
                        return ret;
                mc.precharge++;
index e43142c15631fefdf5a605ced247a6429825252f..b8c11e063ff0746316fb792f4fe1dde0094cb828 100644 (file)
@@ -1033,36 +1033,39 @@ static void node_states_set_node(int node, struct memory_notify *arg)
        node_set_state(node, N_MEMORY);
 }
 
-int zone_can_shift(unsigned long pfn, unsigned long nr_pages,
-                  enum zone_type target)
+bool zone_can_shift(unsigned long pfn, unsigned long nr_pages,
+                  enum zone_type target, int *zone_shift)
 {
        struct zone *zone = page_zone(pfn_to_page(pfn));
        enum zone_type idx = zone_idx(zone);
        int i;
 
+       *zone_shift = 0;
+
        if (idx < target) {
                /* pages must be at end of current zone */
                if (pfn + nr_pages != zone_end_pfn(zone))
-                       return 0;
+                       return false;
 
                /* no zones in use between current zone and target */
                for (i = idx + 1; i < target; i++)
                        if (zone_is_initialized(zone - idx + i))
-                               return 0;
+                               return false;
        }
 
        if (target < idx) {
                /* pages must be at beginning of current zone */
                if (pfn != zone->zone_start_pfn)
-                       return 0;
+                       return false;
 
                /* no zones in use between current zone and target */
                for (i = target + 1; i < idx; i++)
                        if (zone_is_initialized(zone - idx + i))
-                               return 0;
+                               return false;
        }
 
-       return target - idx;
+       *zone_shift = target - idx;
+       return true;
 }
 
 /* Must be protected by mem_hotplug_begin() */
@@ -1089,10 +1092,13 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_typ
            !can_online_high_movable(zone))
                return -EINVAL;
 
-       if (online_type == MMOP_ONLINE_KERNEL)
-               zone_shift = zone_can_shift(pfn, nr_pages, ZONE_NORMAL);
-       else if (online_type == MMOP_ONLINE_MOVABLE)
-               zone_shift = zone_can_shift(pfn, nr_pages, ZONE_MOVABLE);
+       if (online_type == MMOP_ONLINE_KERNEL) {
+               if (!zone_can_shift(pfn, nr_pages, ZONE_NORMAL, &zone_shift))
+                       return -EINVAL;
+       } else if (online_type == MMOP_ONLINE_MOVABLE) {
+               if (!zone_can_shift(pfn, nr_pages, ZONE_MOVABLE, &zone_shift))
+                       return -EINVAL;
+       }
 
        zone = move_pfn_range(zone_shift, pfn, pfn + nr_pages);
        if (!zone)
@@ -1477,17 +1483,20 @@ bool is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages)
 }
 
 /*
- * Confirm all pages in a range [start, end) is belongs to the same zone.
+ * Confirm all pages in a range [start, end) belong to the same zone.
+ * When true, return its valid [start, end).
  */
-int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn)
+int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
+                        unsigned long *valid_start, unsigned long *valid_end)
 {
        unsigned long pfn, sec_end_pfn;
+       unsigned long start, end;
        struct zone *zone = NULL;
        struct page *page;
        int i;
-       for (pfn = start_pfn, sec_end_pfn = SECTION_ALIGN_UP(start_pfn);
+       for (pfn = start_pfn, sec_end_pfn = SECTION_ALIGN_UP(start_pfn + 1);
             pfn < end_pfn;
-            pfn = sec_end_pfn + 1, sec_end_pfn += PAGES_PER_SECTION) {
+            pfn = sec_end_pfn, sec_end_pfn += PAGES_PER_SECTION) {
                /* Make sure the memory section is present first */
                if (!present_section_nr(pfn_to_section_nr(pfn)))
                        continue;
@@ -1503,10 +1512,20 @@ int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn)
                        page = pfn_to_page(pfn + i);
                        if (zone && page_zone(page) != zone)
                                return 0;
+                       if (!zone)
+                               start = pfn + i;
                        zone = page_zone(page);
+                       end = pfn + MAX_ORDER_NR_PAGES;
                }
        }
-       return 1;
+
+       if (zone) {
+               *valid_start = start;
+               *valid_end = end;
+               return 1;
+       } else {
+               return 0;
+       }
 }
 
 /*
@@ -1833,6 +1852,7 @@ static int __ref __offline_pages(unsigned long start_pfn,
        long offlined_pages;
        int ret, drain, retry_max, node;
        unsigned long flags;
+       unsigned long valid_start, valid_end;
        struct zone *zone;
        struct memory_notify arg;
 
@@ -1843,10 +1863,10 @@ static int __ref __offline_pages(unsigned long start_pfn,
                return -EINVAL;
        /* This makes hotplug much easier...and readable.
           we assume this for now. .*/
-       if (!test_pages_in_a_zone(start_pfn, end_pfn))
+       if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start, &valid_end))
                return -EINVAL;
 
-       zone = page_zone(pfn_to_page(start_pfn));
+       zone = page_zone(pfn_to_page(valid_start));
        node = zone_to_nid(zone);
        nr_pages = end_pfn - start_pfn;
 
index 2e346645eb80d6bb8f97761c30aed6a512017e59..1e7873e40c9a16e922d4800e6dc41486eee23540 100644 (file)
@@ -2017,8 +2017,8 @@ retry_cpuset:
 
        nmask = policy_nodemask(gfp, pol);
        zl = policy_zonelist(gfp, pol, node);
-       mpol_cond_put(pol);
        page = __alloc_pages_nodemask(gfp, order, zl, nmask);
+       mpol_cond_put(pol);
 out:
        if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
                goto retry_cpuset;
index d604d2596b7bed41b9748ee3242571b771db4d5e..f3e0c69a97b76997d9fa65cda0b7e1b1fb8fa29a 100644 (file)
@@ -3523,12 +3523,13 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
        struct page *page = NULL;
        unsigned int alloc_flags;
        unsigned long did_some_progress;
-       enum compact_priority compact_priority = DEF_COMPACT_PRIORITY;
+       enum compact_priority compact_priority;
        enum compact_result compact_result;
-       int compaction_retries = 0;
-       int no_progress_loops = 0;
+       int compaction_retries;
+       int no_progress_loops;
        unsigned long alloc_start = jiffies;
        unsigned int stall_timeout = 10 * HZ;
+       unsigned int cpuset_mems_cookie;
 
        /*
         * In the slowpath, we sanity check order to avoid ever trying to
@@ -3549,6 +3550,23 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
                                (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)))
                gfp_mask &= ~__GFP_ATOMIC;
 
+retry_cpuset:
+       compaction_retries = 0;
+       no_progress_loops = 0;
+       compact_priority = DEF_COMPACT_PRIORITY;
+       cpuset_mems_cookie = read_mems_allowed_begin();
+       /*
+        * We need to recalculate the starting point for the zonelist iterator
+        * because we might have used different nodemask in the fast path, or
+        * there was a cpuset modification and we are retrying - otherwise we
+        * could end up iterating over non-eligible zones endlessly.
+        */
+       ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
+                                       ac->high_zoneidx, ac->nodemask);
+       if (!ac->preferred_zoneref->zone)
+               goto nopage;
+
+
        /*
         * The fast path uses conservative alloc_flags to succeed only until
         * kswapd needs to be woken up, and to avoid the cost of setting up
@@ -3708,6 +3726,13 @@ retry:
                                &compaction_retries))
                goto retry;
 
+       /*
+        * It's possible we raced with cpuset update so the OOM would be
+        * premature (see below the nopage: label for full explanation).
+        */
+       if (read_mems_allowed_retry(cpuset_mems_cookie))
+               goto retry_cpuset;
+
        /* Reclaim has failed us, start killing things */
        page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress);
        if (page)
@@ -3720,6 +3745,16 @@ retry:
        }
 
 nopage:
+       /*
+        * When updating a task's mems_allowed or mempolicy nodemask, it is
+        * possible to race with parallel threads in such a way that our
+        * allocation can fail while the mask is being updated. If we are about
+        * to fail, check if the cpuset changed during allocation and if so,
+        * retry.
+        */
+       if (read_mems_allowed_retry(cpuset_mems_cookie))
+               goto retry_cpuset;
+
        warn_alloc(gfp_mask,
                        "page allocation failure: order:%u", order);
 got_pg:
@@ -3734,7 +3769,6 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
                        struct zonelist *zonelist, nodemask_t *nodemask)
 {
        struct page *page;
-       unsigned int cpuset_mems_cookie;
        unsigned int alloc_flags = ALLOC_WMARK_LOW;
        gfp_t alloc_mask = gfp_mask; /* The gfp_t that was actually used for allocation */
        struct alloc_context ac = {
@@ -3771,9 +3805,6 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
        if (IS_ENABLED(CONFIG_CMA) && ac.migratetype == MIGRATE_MOVABLE)
                alloc_flags |= ALLOC_CMA;
 
-retry_cpuset:
-       cpuset_mems_cookie = read_mems_allowed_begin();
-
        /* Dirty zone balancing only done in the fast path */
        ac.spread_dirty_pages = (gfp_mask & __GFP_WRITE);
 
@@ -3784,8 +3815,13 @@ retry_cpuset:
         */
        ac.preferred_zoneref = first_zones_zonelist(ac.zonelist,
                                        ac.high_zoneidx, ac.nodemask);
-       if (!ac.preferred_zoneref) {
+       if (!ac.preferred_zoneref->zone) {
                page = NULL;
+               /*
+                * This might be due to race with cpuset_current_mems_allowed
+                * update, so make sure we retry with original nodemask in the
+                * slow path.
+                */
                goto no_zone;
        }
 
@@ -3794,6 +3830,7 @@ retry_cpuset:
        if (likely(page))
                goto out;
 
+no_zone:
        /*
         * Runtime PM, block IO and its error handling path can deadlock
         * because I/O on the device might not complete.
@@ -3805,21 +3842,10 @@ retry_cpuset:
         * Restore the original nodemask if it was potentially replaced with
         * &cpuset_current_mems_allowed to optimize the fast-path attempt.
         */
-       if (cpusets_enabled())
+       if (unlikely(ac.nodemask != nodemask))
                ac.nodemask = nodemask;
-       page = __alloc_pages_slowpath(alloc_mask, order, &ac);
 
-no_zone:
-       /*
-        * When updating a task's mems_allowed, it is possible to race with
-        * parallel threads in such a way that an allocation can fail while
-        * the mask is being updated. If a page allocation is about to fail,
-        * check if the cpuset changed during allocation and if so, retry.
-        */
-       if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie))) {
-               alloc_mask = gfp_mask;
-               goto retry_cpuset;
-       }
+       page = __alloc_pages_slowpath(alloc_mask, order, &ac);
 
 out:
        if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page &&
@@ -7248,6 +7274,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
                .zone = page_zone(pfn_to_page(start)),
                .mode = MIGRATE_SYNC,
                .ignore_skip_hint = true,
+               .gfp_mask = GFP_KERNEL,
        };
        INIT_LIST_HEAD(&cc.migratepages);
 
index bb53285a1d99666676e85697330f1a052f7c3cc0..3a7587a0314dc73fb4929a824a74f9b8948ea502 100644 (file)
@@ -415,6 +415,7 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
                struct shrink_control *sc, unsigned long nr_to_split)
 {
        LIST_HEAD(list), *pos, *next;
+       LIST_HEAD(to_remove);
        struct inode *inode;
        struct shmem_inode_info *info;
        struct page *page;
@@ -441,9 +442,8 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
                /* Check if there's anything to gain */
                if (round_up(inode->i_size, PAGE_SIZE) ==
                                round_up(inode->i_size, HPAGE_PMD_SIZE)) {
-                       list_del_init(&info->shrinklist);
+                       list_move(&info->shrinklist, &to_remove);
                        removed++;
-                       iput(inode);
                        goto next;
                }
 
@@ -454,6 +454,13 @@ next:
        }
        spin_unlock(&sbinfo->shrinklist_lock);
 
+       list_for_each_safe(pos, next, &to_remove) {
+               info = list_entry(pos, struct shmem_inode_info, shrinklist);
+               inode = &info->vfs_inode;
+               list_del_init(&info->shrinklist);
+               iput(inode);
+       }
+
        list_for_each_safe(pos, next, &list) {
                int ret;
 
index 067598a008493fabb68d48120a904943fff4e08c..7ec0a965c6a3a366550b2500f9880f65e5025138 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -496,10 +496,11 @@ static inline int check_valid_pointer(struct kmem_cache *s,
        return 1;
 }
 
-static void print_section(char *text, u8 *addr, unsigned int length)
+static void print_section(char *level, char *text, u8 *addr,
+                         unsigned int length)
 {
        metadata_access_enable();
-       print_hex_dump(KERN_ERR, text, DUMP_PREFIX_ADDRESS, 16, 1, addr,
+       print_hex_dump(level, text, DUMP_PREFIX_ADDRESS, 16, 1, addr,
                        length, 1);
        metadata_access_disable();
 }
@@ -636,14 +637,15 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
               p, p - addr, get_freepointer(s, p));
 
        if (s->flags & SLAB_RED_ZONE)
-               print_section("Redzone ", p - s->red_left_pad, s->red_left_pad);
+               print_section(KERN_ERR, "Redzone ", p - s->red_left_pad,
+                             s->red_left_pad);
        else if (p > addr + 16)
-               print_section("Bytes b4 ", p - 16, 16);
+               print_section(KERN_ERR, "Bytes b4 ", p - 16, 16);
 
-       print_section("Object ", p, min_t(unsigned long, s->object_size,
-                               PAGE_SIZE));
+       print_section(KERN_ERR, "Object ", p,
+                     min_t(unsigned long, s->object_size, PAGE_SIZE));
        if (s->flags & SLAB_RED_ZONE)
-               print_section("Redzone ", p + s->object_size,
+               print_section(KERN_ERR, "Redzone ", p + s->object_size,
                        s->inuse - s->object_size);
 
        if (s->offset)
@@ -658,7 +660,8 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
 
        if (off != size_from_object(s))
                /* Beginning of the filler is the free pointer */
-               print_section("Padding ", p + off, size_from_object(s) - off);
+               print_section(KERN_ERR, "Padding ", p + off,
+                             size_from_object(s) - off);
 
        dump_stack();
 }
@@ -820,7 +823,7 @@ static int slab_pad_check(struct kmem_cache *s, struct page *page)
                end--;
 
        slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);
-       print_section("Padding ", end - remainder, remainder);
+       print_section(KERN_ERR, "Padding ", end - remainder, remainder);
 
        restore_bytes(s, "slab padding", POISON_INUSE, end - remainder, end);
        return 0;
@@ -973,7 +976,7 @@ static void trace(struct kmem_cache *s, struct page *page, void *object,
                        page->freelist);
 
                if (!alloc)
-                       print_section("Object ", (void *)object,
+                       print_section(KERN_INFO, "Object ", (void *)object,
                                        s->object_size);
 
                dump_stack();
@@ -1419,6 +1422,10 @@ static int init_cache_random_seq(struct kmem_cache *s)
        int err;
        unsigned long i, count = oo_objects(s->oo);
 
+       /* Bailout if already initialised */
+       if (s->random_seq)
+               return 0;
+
        err = cache_random_seq_create(s, count, GFP_KERNEL);
        if (err) {
                pr_err("SLUB: Unable to initialize free list for %s\n",
index 067a0d62f31841d16913d36e38531a277ab59b01..cabf09e0128beebdee2b8a959361fe6464fb3469 100644 (file)
@@ -78,7 +78,13 @@ static u64 zswap_duplicate_entry;
 
 /* Enable/disable zswap (disabled by default) */
 static bool zswap_enabled;
-module_param_named(enabled, zswap_enabled, bool, 0644);
+static int zswap_enabled_param_set(const char *,
+                                  const struct kernel_param *);
+static struct kernel_param_ops zswap_enabled_param_ops = {
+       .set =          zswap_enabled_param_set,
+       .get =          param_get_bool,
+};
+module_param_cb(enabled, &zswap_enabled_param_ops, &zswap_enabled, 0644);
 
 /* Crypto compressor to use */
 #define ZSWAP_COMPRESSOR_DEFAULT "lzo"
@@ -176,6 +182,9 @@ static atomic_t zswap_pools_count = ATOMIC_INIT(0);
 /* used by param callback function */
 static bool zswap_init_started;
 
+/* fatal error during init */
+static bool zswap_init_failed;
+
 /*********************************
 * helpers and fwd declarations
 **********************************/
@@ -624,6 +633,11 @@ static int __zswap_param_set(const char *val, const struct kernel_param *kp,
        char *s = strstrip((char *)val);
        int ret;
 
+       if (zswap_init_failed) {
+               pr_err("can't set param, initialization failed\n");
+               return -ENODEV;
+       }
+
        /* no change required */
        if (!strcmp(s, *(char **)kp->arg))
                return 0;
@@ -703,6 +717,17 @@ static int zswap_zpool_param_set(const char *val,
        return __zswap_param_set(val, kp, NULL, zswap_compressor);
 }
 
+static int zswap_enabled_param_set(const char *val,
+                                  const struct kernel_param *kp)
+{
+       if (zswap_init_failed) {
+               pr_err("can't enable, initialization failed\n");
+               return -ENODEV;
+       }
+
+       return param_set_bool(val, kp);
+}
+
 /*********************************
 * writeback code
 **********************************/
@@ -1201,6 +1226,9 @@ hp_fail:
 dstmem_fail:
        zswap_entry_cache_destroy();
 cache_fail:
+       /* if built-in, we aren't unloaded on failure; don't allow use */
+       zswap_init_failed = true;
+       zswap_enabled = false;
        return -ENOMEM;
 }
 /* must be late so crypto has time to come up */
index 9c561e683f4b8b68642b626b51a0dcda30260e97..0854ebd8613e9bf9044b04099b11341325d6e194 100644 (file)
@@ -474,7 +474,7 @@ int batadv_frag_send_packet(struct sk_buff *skb,
        primary_if = batadv_primary_if_get_selected(bat_priv);
        if (!primary_if) {
                ret = -EINVAL;
-               goto put_primary_if;
+               goto free_skb;
        }
 
        /* Create one header to be copied to all fragments */
@@ -502,7 +502,7 @@ int batadv_frag_send_packet(struct sk_buff *skb,
                skb_fragment = batadv_frag_create(skb, &frag_header, mtu);
                if (!skb_fragment) {
                        ret = -ENOMEM;
-                       goto free_skb;
+                       goto put_primary_if;
                }
 
                batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_TX);
@@ -511,7 +511,7 @@ int batadv_frag_send_packet(struct sk_buff *skb,
                ret = batadv_send_unicast_skb(skb_fragment, neigh_node);
                if (ret != NET_XMIT_SUCCESS) {
                        ret = NET_XMIT_DROP;
-                       goto free_skb;
+                       goto put_primary_if;
                }
 
                frag_header.no++;
@@ -519,7 +519,7 @@ int batadv_frag_send_packet(struct sk_buff *skb,
                /* The initial check in this function should cover this case */
                if (frag_header.no == BATADV_FRAG_MAX_FRAGMENTS - 1) {
                        ret = -EINVAL;
-                       goto free_skb;
+                       goto put_primary_if;
                }
        }
 
@@ -527,7 +527,7 @@ int batadv_frag_send_packet(struct sk_buff *skb,
        if (batadv_skb_head_push(skb, header_size) < 0 ||
            pskb_expand_head(skb, header_size + ETH_HLEN, 0, GFP_ATOMIC) < 0) {
                ret = -ENOMEM;
-               goto free_skb;
+               goto put_primary_if;
        }
 
        memcpy(skb->data, &frag_header, header_size);
index 1904a93f47d50a2bd1c2c1651f48b1ab762ece3c..d491529332f4568b532759331bd8f88653573ba0 100644 (file)
@@ -920,7 +920,7 @@ static void chan_close_cb(struct l2cap_chan *chan)
                        BT_DBG("dev %p removing %speer %p", dev,
                               last ? "last " : "1 ", peer);
                        BT_DBG("chan %p orig refcnt %d", chan,
-                              atomic_read(&chan->kref.refcount));
+                              kref_read(&chan->kref));
 
                        l2cap_chan_put(chan);
                        break;
index 5f123c3320a7be1f355d31f1023d3e3996853339..f0095fd79818de116164a226e7d9feb27abfa5b4 100644 (file)
@@ -810,7 +810,7 @@ static struct l2cap_chan *a2mp_chan_open(struct l2cap_conn *conn, bool locked)
 /* AMP Manager functions */
 struct amp_mgr *amp_mgr_get(struct amp_mgr *mgr)
 {
-       BT_DBG("mgr %p orig refcnt %d", mgr, atomic_read(&mgr->kref.refcount));
+       BT_DBG("mgr %p orig refcnt %d", mgr, kref_read(&mgr->kref));
 
        kref_get(&mgr->kref);
 
@@ -833,7 +833,7 @@ static void amp_mgr_destroy(struct kref *kref)
 
 int amp_mgr_put(struct amp_mgr *mgr)
 {
-       BT_DBG("mgr %p orig refcnt %d", mgr, atomic_read(&mgr->kref.refcount));
+       BT_DBG("mgr %p orig refcnt %d", mgr, kref_read(&mgr->kref));
 
        return kref_put(&mgr->kref, &amp_mgr_destroy);
 }
index e32f34189007967e7674a501e2c944029623cfab..02a4ccc04e1ebba5301f8d2bfec55229d428ffd8 100644 (file)
@@ -24,7 +24,7 @@
 void amp_ctrl_get(struct amp_ctrl *ctrl)
 {
        BT_DBG("ctrl %p orig refcnt %d", ctrl,
-              atomic_read(&ctrl->kref.refcount));
+              kref_read(&ctrl->kref));
 
        kref_get(&ctrl->kref);
 }
@@ -42,7 +42,7 @@ static void amp_ctrl_destroy(struct kref *kref)
 int amp_ctrl_put(struct amp_ctrl *ctrl)
 {
        BT_DBG("ctrl %p orig refcnt %d", ctrl,
-              atomic_read(&ctrl->kref.refcount));
+              kref_read(&ctrl->kref));
 
        return kref_put(&ctrl->kref, &amp_ctrl_destroy);
 }
index ce0b5dd01953694ffdcff246b8c3ab47ee689c03..fc7f321a382369f0d5097ce785921b1298a237f0 100644 (file)
@@ -481,14 +481,14 @@ static void l2cap_chan_destroy(struct kref *kref)
 
 void l2cap_chan_hold(struct l2cap_chan *c)
 {
-       BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
+       BT_DBG("chan %p orig refcnt %d", c, kref_read(&c->kref));
 
        kref_get(&c->kref);
 }
 
 void l2cap_chan_put(struct l2cap_chan *c)
 {
-       BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
+       BT_DBG("chan %p orig refcnt %d", c, kref_read(&c->kref));
 
        kref_put(&c->kref, l2cap_chan_destroy);
 }
index 71c7453268c1cf5a34949b0451f457ac18a4657b..7109b389ea585ab4b3a52bac803ae1fd860c2f65 100644 (file)
@@ -781,20 +781,6 @@ static int br_validate(struct nlattr *tb[], struct nlattr *data[])
        return 0;
 }
 
-static int br_dev_newlink(struct net *src_net, struct net_device *dev,
-                         struct nlattr *tb[], struct nlattr *data[])
-{
-       struct net_bridge *br = netdev_priv(dev);
-
-       if (tb[IFLA_ADDRESS]) {
-               spin_lock_bh(&br->lock);
-               br_stp_change_bridge_id(br, nla_data(tb[IFLA_ADDRESS]));
-               spin_unlock_bh(&br->lock);
-       }
-
-       return register_netdevice(dev);
-}
-
 static int br_port_slave_changelink(struct net_device *brdev,
                                    struct net_device *dev,
                                    struct nlattr *tb[],
@@ -1115,6 +1101,25 @@ static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
        return 0;
 }
 
+static int br_dev_newlink(struct net *src_net, struct net_device *dev,
+                         struct nlattr *tb[], struct nlattr *data[])
+{
+       struct net_bridge *br = netdev_priv(dev);
+       int err;
+
+       if (tb[IFLA_ADDRESS]) {
+               spin_lock_bh(&br->lock);
+               br_stp_change_bridge_id(br, nla_data(tb[IFLA_ADDRESS]));
+               spin_unlock_bh(&br->lock);
+       }
+
+       err = br_changelink(dev, tb, data);
+       if (err)
+               return err;
+
+       return register_netdevice(dev);
+}
+
 static size_t br_get_size(const struct net_device *brdev)
 {
        return nla_total_size(sizeof(u32)) +    /* IFLA_BR_FORWARD_DELAY  */
index 1108079d934f8383a599d7997b08100fca0465e9..5488e4a6ccd062e6f6e7e2b841dde5ef055d4337 100644 (file)
@@ -445,6 +445,7 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
  * @func: callback function on filter match
  * @data: returned parameter for callback function
  * @ident: string for calling module identification
+ * @sk: socket pointer (might be NULL)
  *
  * Description:
  *  Invokes the callback function with the received sk_buff and the given
@@ -468,7 +469,7 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
  */
 int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask,
                    void (*func)(struct sk_buff *, void *), void *data,
-                   char *ident)
+                   char *ident, struct sock *sk)
 {
        struct receiver *r;
        struct hlist_head *rl;
@@ -496,6 +497,7 @@ int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask,
                r->func    = func;
                r->data    = data;
                r->ident   = ident;
+               r->sk      = sk;
 
                hlist_add_head_rcu(&r->list, rl);
                d->entries++;
@@ -520,8 +522,11 @@ EXPORT_SYMBOL(can_rx_register);
 static void can_rx_delete_receiver(struct rcu_head *rp)
 {
        struct receiver *r = container_of(rp, struct receiver, rcu);
+       struct sock *sk = r->sk;
 
        kmem_cache_free(rcv_cache, r);
+       if (sk)
+               sock_put(sk);
 }
 
 /**
@@ -596,8 +601,11 @@ void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask,
        spin_unlock(&can_rcvlists_lock);
 
        /* schedule the receiver item for deletion */
-       if (r)
+       if (r) {
+               if (r->sk)
+                       sock_hold(r->sk);
                call_rcu(&r->rcu, can_rx_delete_receiver);
+       }
 }
 EXPORT_SYMBOL(can_rx_unregister);
 
index fca0fe9fc45a497cdf3da82d5414e846e7cc61b7..b86f5129e8385fe84ef671bb914e8e05c2977ca0 100644 (file)
 
 struct receiver {
        struct hlist_node list;
-       struct rcu_head rcu;
        canid_t can_id;
        canid_t mask;
        unsigned long matches;
        void (*func)(struct sk_buff *, void *);
        void *data;
        char *ident;
+       struct sock *sk;
+       struct rcu_head rcu;
 };
 
 #define CAN_SFF_RCV_ARRAY_SZ (1 << CAN_SFF_ID_BITS)
index 21ac75390e3d64f795faad074b515d34ce0bbfa3..95d13b233c65161cf3595a8b0036207f5c2892e3 100644 (file)
@@ -734,14 +734,23 @@ static struct bcm_op *bcm_find_op(struct list_head *ops,
 
 static void bcm_remove_op(struct bcm_op *op)
 {
-       hrtimer_cancel(&op->timer);
-       hrtimer_cancel(&op->thrtimer);
-
-       if (op->tsklet.func)
-               tasklet_kill(&op->tsklet);
+       if (op->tsklet.func) {
+               while (test_bit(TASKLET_STATE_SCHED, &op->tsklet.state) ||
+                      test_bit(TASKLET_STATE_RUN, &op->tsklet.state) ||
+                      hrtimer_active(&op->timer)) {
+                       hrtimer_cancel(&op->timer);
+                       tasklet_kill(&op->tsklet);
+               }
+       }
 
-       if (op->thrtsklet.func)
-               tasklet_kill(&op->thrtsklet);
+       if (op->thrtsklet.func) {
+               while (test_bit(TASKLET_STATE_SCHED, &op->thrtsklet.state) ||
+                      test_bit(TASKLET_STATE_RUN, &op->thrtsklet.state) ||
+                      hrtimer_active(&op->thrtimer)) {
+                       hrtimer_cancel(&op->thrtimer);
+                       tasklet_kill(&op->thrtsklet);
+               }
+       }
 
        if ((op->frames) && (op->frames != &op->sframe))
                kfree(op->frames);
@@ -1216,7 +1225,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
                                err = can_rx_register(dev, op->can_id,
                                                      REGMASK(op->can_id),
                                                      bcm_rx_handler, op,
-                                                     "bcm");
+                                                     "bcm", sk);
 
                                op->rx_reg_dev = dev;
                                dev_put(dev);
@@ -1225,7 +1234,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
                } else
                        err = can_rx_register(NULL, op->can_id,
                                              REGMASK(op->can_id),
-                                             bcm_rx_handler, op, "bcm");
+                                             bcm_rx_handler, op, "bcm", sk);
                if (err) {
                        /* this bcm rx op is broken -> remove it */
                        list_del(&op->list);
index a54ab0c821048ab2034bf32cef3c1f35e0dc82a5..7056a1a2bb70098e691ce557f05e5bc1f27cb42f 100644 (file)
@@ -442,7 +442,7 @@ static inline int cgw_register_filter(struct cgw_job *gwj)
 {
        return can_rx_register(gwj->src.dev, gwj->ccgw.filter.can_id,
                               gwj->ccgw.filter.can_mask, can_can_gw_rcv,
-                              gwj, "gw");
+                              gwj, "gw", NULL);
 }
 
 static inline void cgw_unregister_filter(struct cgw_job *gwj)
index b075f028d7e23958e9433a4b19f4475ad930b547..6dc546a06673ff41fc121c546ebd0567bb0da05f 100644 (file)
@@ -190,7 +190,7 @@ static int raw_enable_filters(struct net_device *dev, struct sock *sk,
        for (i = 0; i < count; i++) {
                err = can_rx_register(dev, filter[i].can_id,
                                      filter[i].can_mask,
-                                     raw_rcv, sk, "raw");
+                                     raw_rcv, sk, "raw", sk);
                if (err) {
                        /* clean up successfully registered filters */
                        while (--i >= 0)
@@ -211,7 +211,7 @@ static int raw_enable_errfilter(struct net_device *dev, struct sock *sk,
 
        if (err_mask)
                err = can_rx_register(dev, 0, err_mask | CAN_ERR_FLAG,
-                                     raw_rcv, sk, "raw");
+                                     raw_rcv, sk, "raw", sk);
 
        return err;
 }
index 770c52701efa3e08cec85c756fd7b8125f63ac91..bad3d4ae43f6e929e9af9616bd17e755ef6f9f69 100644 (file)
@@ -3425,7 +3425,7 @@ static void ceph_msg_release(struct kref *kref)
 struct ceph_msg *ceph_msg_get(struct ceph_msg *msg)
 {
        dout("%s %p (was %d)\n", __func__, msg,
-            atomic_read(&msg->kref.refcount));
+            kref_read(&msg->kref));
        kref_get(&msg->kref);
        return msg;
 }
@@ -3434,7 +3434,7 @@ EXPORT_SYMBOL(ceph_msg_get);
 void ceph_msg_put(struct ceph_msg *msg)
 {
        dout("%s %p (was %d)\n", __func__, msg,
-            atomic_read(&msg->kref.refcount));
+            kref_read(&msg->kref));
        kref_put(&msg->kref, ceph_msg_release);
 }
 EXPORT_SYMBOL(ceph_msg_put);
index 842f049abb86d9233f23581ea1a0cc003a0d7ecb..f3378ba1a82893024b9012c5421099bce87f1824 100644 (file)
@@ -438,7 +438,7 @@ static void ceph_osdc_release_request(struct kref *kref)
 void ceph_osdc_get_request(struct ceph_osd_request *req)
 {
        dout("%s %p (was %d)\n", __func__, req,
-            atomic_read(&req->r_kref.refcount));
+            kref_read(&req->r_kref));
        kref_get(&req->r_kref);
 }
 EXPORT_SYMBOL(ceph_osdc_get_request);
@@ -447,7 +447,7 @@ void ceph_osdc_put_request(struct ceph_osd_request *req)
 {
        if (req) {
                dout("%s %p (was %d)\n", __func__, req,
-                    atomic_read(&req->r_kref.refcount));
+                    kref_read(&req->r_kref));
                kref_put(&req->r_kref, ceph_osdc_release_request);
        }
 }
@@ -487,11 +487,11 @@ static void request_reinit(struct ceph_osd_request *req)
        struct ceph_msg *reply_msg = req->r_reply;
 
        dout("%s req %p\n", __func__, req);
-       WARN_ON(atomic_read(&req->r_kref.refcount) != 1);
+       WARN_ON(kref_read(&req->r_kref) != 1);
        request_release_checks(req);
 
-       WARN_ON(atomic_read(&request_msg->kref.refcount) != 1);
-       WARN_ON(atomic_read(&reply_msg->kref.refcount) != 1);
+       WARN_ON(kref_read(&request_msg->kref) != 1);
+       WARN_ON(kref_read(&reply_msg->kref) != 1);
        target_destroy(&req->r_t);
 
        request_init(req);
index 662bea5871656f190a61e35b3c5cd21c2f132441..ea633342ab0d046cbc49e55b679440ef9e015c2d 100644 (file)
@@ -332,7 +332,9 @@ void __skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb, int len)
 EXPORT_SYMBOL(__skb_free_datagram_locked);
 
 int __sk_queue_drop_skb(struct sock *sk, struct sk_buff *skb,
-                       unsigned int flags)
+                       unsigned int flags,
+                       void (*destructor)(struct sock *sk,
+                                          struct sk_buff *skb))
 {
        int err = 0;
 
@@ -342,6 +344,8 @@ int __sk_queue_drop_skb(struct sock *sk, struct sk_buff *skb,
                if (skb == skb_peek(&sk->sk_receive_queue)) {
                        __skb_unlink(skb, &sk->sk_receive_queue);
                        atomic_dec(&skb->users);
+                       if (destructor)
+                               destructor(sk, skb);
                        err = 0;
                }
                spin_unlock_bh(&sk->sk_receive_queue.lock);
@@ -375,7 +379,7 @@ EXPORT_SYMBOL(__sk_queue_drop_skb);
 
 int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
 {
-       int err = __sk_queue_drop_skb(sk, skb, flags);
+       int err = __sk_queue_drop_skb(sk, skb, flags, NULL);
 
        kfree_skb(skb);
        sk_mem_reclaim_partial(sk);
index 07b307b0b414730688b64fdb2295b0fa1b721e51..29101c98399f40b6b8e42c31a255d8f1fb6bd7a1 100644 (file)
@@ -1695,24 +1695,19 @@ EXPORT_SYMBOL_GPL(net_dec_egress_queue);
 
 static struct static_key netstamp_needed __read_mostly;
 #ifdef HAVE_JUMP_LABEL
-/* We are not allowed to call static_key_slow_dec() from irq context
- * If net_disable_timestamp() is called from irq context, defer the
- * static_key_slow_dec() calls.
- */
 static atomic_t netstamp_needed_deferred;
-#endif
-
-void net_enable_timestamp(void)
+static void netstamp_clear(struct work_struct *work)
 {
-#ifdef HAVE_JUMP_LABEL
        int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
 
-       if (deferred) {
-               while (--deferred)
-                       static_key_slow_dec(&netstamp_needed);
-               return;
-       }
+       while (deferred--)
+               static_key_slow_dec(&netstamp_needed);
+}
+static DECLARE_WORK(netstamp_work, netstamp_clear);
 #endif
+
+void net_enable_timestamp(void)
+{
        static_key_slow_inc(&netstamp_needed);
 }
 EXPORT_SYMBOL(net_enable_timestamp);
@@ -1720,12 +1715,12 @@ EXPORT_SYMBOL(net_enable_timestamp);
 void net_disable_timestamp(void)
 {
 #ifdef HAVE_JUMP_LABEL
-       if (in_interrupt()) {
-               atomic_inc(&netstamp_needed_deferred);
-               return;
-       }
-#endif
+       /* net_disable_timestamp() can be called from non process context */
+       atomic_inc(&netstamp_needed_deferred);
+       schedule_work(&netstamp_work);
+#else
        static_key_slow_dec(&netstamp_needed);
+#endif
 }
 EXPORT_SYMBOL(net_disable_timestamp);
 
@@ -2795,9 +2790,9 @@ static netdev_features_t harmonize_features(struct sk_buff *skb,
        if (skb->ip_summed != CHECKSUM_NONE &&
            !can_checksum_protocol(features, type)) {
                features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
-       } else if (illegal_highdma(skb->dev, skb)) {
-               features &= ~NETIF_F_SG;
        }
+       if (illegal_highdma(skb->dev, skb))
+               features &= ~NETIF_F_SG;
 
        return features;
 }
index e23766c7e3ba19414494d242af86c1029e8eee61..d92de0a1f0a49d51ec8329c65d46a4f2ae304ebd 100644 (file)
@@ -1405,9 +1405,12 @@ static int ethtool_get_regs(struct net_device *dev, char __user *useraddr)
        if (regs.len > reglen)
                regs.len = reglen;
 
-       regbuf = vzalloc(reglen);
-       if (reglen && !regbuf)
-               return -ENOMEM;
+       regbuf = NULL;
+       if (reglen) {
+               regbuf = vzalloc(reglen);
+               if (!regbuf)
+                       return -ENOMEM;
+       }
 
        ops->get_regs(dev, &regs, regbuf);
 
@@ -1712,7 +1715,7 @@ static noinline_for_stack int ethtool_get_channels(struct net_device *dev,
 static noinline_for_stack int ethtool_set_channels(struct net_device *dev,
                                                   void __user *useraddr)
 {
-       struct ethtool_channels channels, max;
+       struct ethtool_channels channels, max = { .cmd = ETHTOOL_GCHANNELS };
        u32 max_rx_in_use = 0;
 
        if (!dev->ethtool_ops->set_channels || !dev->ethtool_ops->get_channels)
index 71bb3e2eca080735a771a4c4eaf1a3b74b48da9e..b3eef90b2df9d05b62750d3d6fafd7b096f92bd9 100644 (file)
@@ -386,6 +386,7 @@ static const struct lwtunnel_encap_ops bpf_encap_ops = {
        .fill_encap     = bpf_fill_encap_info,
        .get_encap_size = bpf_encap_nlsize,
        .cmp_encap      = bpf_encap_cmp,
+       .owner          = THIS_MODULE,
 };
 
 static int __init bpf_lwt_init(void)
index a5d4e866ce88b4d055798d9ea55fc905b351fb3d..c23465005f2f4ced93d7bcb2754fb267c2cf00d0 100644 (file)
@@ -26,6 +26,7 @@
 #include <net/lwtunnel.h>
 #include <net/rtnetlink.h>
 #include <net/ip6_fib.h>
+#include <net/nexthop.h>
 
 #ifdef CONFIG_MODULES
 
@@ -114,25 +115,77 @@ int lwtunnel_build_state(struct net_device *dev, u16 encap_type,
        ret = -EOPNOTSUPP;
        rcu_read_lock();
        ops = rcu_dereference(lwtun_encaps[encap_type]);
+       if (likely(ops && ops->build_state && try_module_get(ops->owner))) {
+               ret = ops->build_state(dev, encap, family, cfg, lws);
+               if (ret)
+                       module_put(ops->owner);
+       }
+       rcu_read_unlock();
+
+       return ret;
+}
+EXPORT_SYMBOL(lwtunnel_build_state);
+
+int lwtunnel_valid_encap_type(u16 encap_type)
+{
+       const struct lwtunnel_encap_ops *ops;
+       int ret = -EINVAL;
+
+       if (encap_type == LWTUNNEL_ENCAP_NONE ||
+           encap_type > LWTUNNEL_ENCAP_MAX)
+               return ret;
+
+       rcu_read_lock();
+       ops = rcu_dereference(lwtun_encaps[encap_type]);
+       rcu_read_unlock();
 #ifdef CONFIG_MODULES
        if (!ops) {
                const char *encap_type_str = lwtunnel_encap_str(encap_type);
 
                if (encap_type_str) {
-                       rcu_read_unlock();
+                       __rtnl_unlock();
                        request_module("rtnl-lwt-%s", encap_type_str);
+                       rtnl_lock();
+
                        rcu_read_lock();
                        ops = rcu_dereference(lwtun_encaps[encap_type]);
+                       rcu_read_unlock();
                }
        }
 #endif
-       if (likely(ops && ops->build_state))
-               ret = ops->build_state(dev, encap, family, cfg, lws);
-       rcu_read_unlock();
+       return ops ? 0 : -EOPNOTSUPP;
+}
+EXPORT_SYMBOL(lwtunnel_valid_encap_type);
 
-       return ret;
+int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int remaining)
+{
+       struct rtnexthop *rtnh = (struct rtnexthop *)attr;
+       struct nlattr *nla_entype;
+       struct nlattr *attrs;
+       struct nlattr *nla;
+       u16 encap_type;
+       int attrlen;
+
+       while (rtnh_ok(rtnh, remaining)) {
+               attrlen = rtnh_attrlen(rtnh);
+               if (attrlen > 0) {
+                       attrs = rtnh_attrs(rtnh);
+                       nla = nla_find(attrs, attrlen, RTA_ENCAP);
+                       nla_entype = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
+
+                       if (nla_entype) {
+                               encap_type = nla_get_u16(nla_entype);
+
+                               if (lwtunnel_valid_encap_type(encap_type) != 0)
+                                       return -EOPNOTSUPP;
+                       }
+               }
+               rtnh = rtnh_next(rtnh, &remaining);
+       }
+
+       return 0;
 }
-EXPORT_SYMBOL(lwtunnel_build_state);
+EXPORT_SYMBOL(lwtunnel_valid_encap_type_attr);
 
 void lwtstate_free(struct lwtunnel_state *lws)
 {
@@ -144,6 +197,7 @@ void lwtstate_free(struct lwtunnel_state *lws)
        } else {
                kfree(lws);
        }
+       module_put(ops->owner);
 }
 EXPORT_SYMBOL(lwtstate_free);
 
index 7bb12e07ffef4273e156893cc150ea5c9d79e8bd..e7c12caa20c88acc9a5dd86f07d11644fb58341d 100644 (file)
@@ -2923,7 +2923,8 @@ static void neigh_proc_update(struct ctl_table *ctl, int write)
                return;
 
        set_bit(index, p->data_state);
-       call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p);
+       if (index == NEIGH_VAR_DELAY_PROBE_TIME)
+               call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p);
        if (!dev) /* NULL dev means this is default value */
                neigh_copy_dflt_parms(net, p, index);
 }
index ba347184bda9b3fee1f86db847352ab133ddaa3f..8fedc2d497709b3dea9202894f45bf5cab043361 100644 (file)
@@ -606,7 +606,8 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
                        if (inet_csk(sk)->icsk_af_ops->conn_request(sk,
                                                                    skb) < 0)
                                return 1;
-                       goto discard;
+                       consume_skb(skb);
+                       return 0;
                }
                if (dh->dccph_type == DCCP_PKT_RESET)
                        goto discard;
index adfc790f71935913801f90d3ab46b7c133af4ec3..c4e879c021868719d928f49e2ef2f5c5d1876534 100644 (file)
@@ -227,7 +227,7 @@ static int dccp_v6_send_response(const struct sock *sk, struct request_sock *req
                opt = ireq->ipv6_opt;
                if (!opt)
                        opt = rcu_dereference(np->opt);
-               err = ip6_xmit(sk, skb, &fl6, opt, np->tclass);
+               err = ip6_xmit(sk, skb, &fl6, sk->sk_mark, opt, np->tclass);
                rcu_read_unlock();
                err = net_xmit_eval(err);
        }
@@ -281,7 +281,7 @@ static void dccp_v6_ctl_send_reset(const struct sock *sk, struct sk_buff *rxskb)
        dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
        if (!IS_ERR(dst)) {
                skb_dst_set(skb, dst);
-               ip6_xmit(ctl_sk, skb, &fl6, NULL, 0);
+               ip6_xmit(ctl_sk, skb, &fl6, 0, NULL, 0);
                DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
                DCCP_INC_STATS(DCCP_MIB_OUTRSTS);
                return;
index da38621245458bae2506b0c030d92315f1be5775..0f99297b2fb3517942bf74fee08ed3e61d65a4f0 100644 (file)
@@ -273,6 +273,7 @@ static int dsa_user_port_apply(struct device_node *port, u32 index,
        if (err) {
                dev_warn(ds->dev, "Failed to create slave %d: %d\n",
                         index, err);
+               ds->ports[index].netdev = NULL;
                return err;
        }
 
index 68c9eea0051872eb151ee7d05f92dcc4638e3db6..7d45961108511488003380da656bf8a4f2fd7d84 100644 (file)
@@ -1105,10 +1105,8 @@ static int dsa_slave_phy_connect(struct dsa_slave_priv *p,
        /* Use already configured phy mode */
        if (p->phy_interface == PHY_INTERFACE_MODE_NA)
                p->phy_interface = p->phy->interface;
-       phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link,
-                          p->phy_interface);
-
-       return 0;
+       return phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link,
+                                 p->phy_interface);
 }
 
 static int dsa_slave_phy_setup(struct dsa_slave_priv *p,
@@ -1203,6 +1201,8 @@ int dsa_slave_suspend(struct net_device *slave_dev)
 {
        struct dsa_slave_priv *p = netdev_priv(slave_dev);
 
+       netif_device_detach(slave_dev);
+
        if (p->phy) {
                phy_stop(p->phy);
                p->old_pause = -1;
index 8c5a479681ca9ed8c2686208fc570e819a4db31a..516c87e75de7009e9e4f0cbdc80f8160c318fdf4 100644 (file)
@@ -356,6 +356,7 @@ void ether_setup(struct net_device *dev)
        dev->header_ops         = &eth_header_ops;
        dev->type               = ARPHRD_ETHER;
        dev->hard_header_len    = ETH_HLEN;
+       dev->min_header_len     = ETH_HLEN;
        dev->mtu                = ETH_DATA_LEN;
        dev->min_mtu            = ETH_MIN_MTU;
        dev->max_mtu            = ETH_DATA_LEN;
index 89a8cac4726a5e354371bb0c76a7e60cd2d7026e..51b27ae09fbd725bcd8030982e5850215ac4ce5c 100644 (file)
@@ -1263,7 +1263,7 @@ void __init arp_init(void)
 /*
  *     ax25 -> ASCII conversion
  */
-static char *ax2asc2(ax25_address *a, char *buf)
+static void ax2asc2(ax25_address *a, char *buf)
 {
        char c, *s;
        int n;
@@ -1285,10 +1285,10 @@ static char *ax2asc2(ax25_address *a, char *buf)
        *s++ = n + '0';
        *s++ = '\0';
 
-       if (*buf == '\0' || *buf == '-')
-               return "*";
-
-       return buf;
+       if (*buf == '\0' || *buf == '-') {
+               buf[0] = '*';
+               buf[1] = '\0';
+       }
 }
 #endif /* CONFIG_AX25 */
 
@@ -1322,7 +1322,7 @@ static void arp_format_neigh_entry(struct seq_file *seq,
        }
 #endif
        sprintf(tbuf, "%pI4", n->primary_key);
-       seq_printf(seq, "%-16s 0x%-10x0x%-10x%s     *        %s\n",
+       seq_printf(seq, "%-16s 0x%-10x0x%-10x%-17s     *        %s\n",
                   tbuf, hatype, arp_state_to_flags(n), hbuffer, dev->name);
        read_unlock(&n->lock);
 }
index 72d6f056d863603c959e1d04b9f863909a37c758..ae206163c273381ba6e8bd8a24fa050619a4a6ae 100644 (file)
@@ -1587,6 +1587,10 @@ int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option)
                                goto validate_return_locked;
                        }
 
+               if (opt_iter + 1 == opt_len) {
+                       err_offset = opt_iter;
+                       goto validate_return_locked;
+               }
                tag_len = tag[1];
                if (tag_len > (opt_len - opt_iter)) {
                        err_offset = opt_iter + 1;
index eae0332b0e8c1f861ce629ed9ce3ddc45802a6b8..7db2ad2e82d3193ff1748bf393f536ba3a5a3eb9 100644 (file)
@@ -46,6 +46,7 @@
 #include <net/rtnetlink.h>
 #include <net/xfrm.h>
 #include <net/l3mdev.h>
+#include <net/lwtunnel.h>
 #include <trace/events/fib.h>
 
 #ifndef CONFIG_IP_MULTIPLE_TABLES
@@ -677,6 +678,10 @@ static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
                        cfg->fc_mx_len = nla_len(attr);
                        break;
                case RTA_MULTIPATH:
+                       err = lwtunnel_valid_encap_type_attr(nla_data(attr),
+                                                            nla_len(attr));
+                       if (err < 0)
+                               goto errout;
                        cfg->fc_mp = nla_data(attr);
                        cfg->fc_mp_len = nla_len(attr);
                        break;
@@ -691,6 +696,9 @@ static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
                        break;
                case RTA_ENCAP_TYPE:
                        cfg->fc_encap_type = nla_get_u16(attr);
+                       err = lwtunnel_valid_encap_type(cfg->fc_encap_type);
+                       if (err < 0)
+                               goto errout;
                        break;
                }
        }
index 5b15459955f84cfc26dd2b12f129b1ee4014e62b..44fd86de2823dd17de16276a8ec01b190e69b8b4 100644 (file)
@@ -1172,6 +1172,7 @@ static void igmpv3_del_delrec(struct in_device *in_dev, struct ip_mc_list *im)
                                psf->sf_crcount = im->crcount;
                }
                in_dev_put(pmc->interface);
+               kfree(pmc);
        }
        spin_unlock_bh(&im->lock);
 }
index fac275c4810865a5b9b9ca1ac9fc826b8482aa9f..b67719f459537d49d958de9874414ea868c4a8e1 100644 (file)
@@ -1629,6 +1629,7 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
        sk->sk_protocol = ip_hdr(skb)->protocol;
        sk->sk_bound_dev_if = arg->bound_dev_if;
        sk->sk_sndbuf = sysctl_wmem_default;
+       sk->sk_mark = fl4.flowi4_mark;
        err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base,
                             len, 0, &ipc, &rt, MSG_DONTWAIT);
        if (unlikely(err)) {
index 53ae0c6315ad03e46f93ae68cb930fff5848edcd..900011709e3b8e4807daaa6bf537c3871a7d9306 100644 (file)
@@ -1238,7 +1238,14 @@ void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb)
                pktinfo->ipi_ifindex = 0;
                pktinfo->ipi_spec_dst.s_addr = 0;
        }
-       skb_dst_drop(skb);
+       /* We need to keep the dst for __ip_options_echo()
+        * We could restrict the test to opt.ts_needtime || opt.srr,
+        * but the following is good enough as IP options are not often used.
+        */
+       if (unlikely(IPCB(skb)->opt.optlen))
+               skb_dst_force(skb);
+       else
+               skb_dst_drop(skb);
 }
 
 int ip_setsockopt(struct sock *sk, int level,
index fed3d29f9eb3b716664b8d9eba052695cbb867bd..0fd1976ab63bbd9be357e41b9acd71dcfa507665 100644 (file)
@@ -313,6 +313,7 @@ static const struct lwtunnel_encap_ops ip_tun_lwt_ops = {
        .fill_encap = ip_tun_fill_encap_info,
        .get_encap_size = ip_tun_encap_nlsize,
        .cmp_encap = ip_tun_cmp_encap,
+       .owner = THIS_MODULE,
 };
 
 static const struct nla_policy ip6_tun_policy[LWTUNNEL_IP6_MAX + 1] = {
@@ -403,6 +404,7 @@ static const struct lwtunnel_encap_ops ip6_tun_lwt_ops = {
        .fill_encap = ip6_tun_fill_encap_info,
        .get_encap_size = ip6_tun_encap_nlsize,
        .cmp_encap = ip_tun_cmp_encap,
+       .owner = THIS_MODULE,
 };
 
 void __init ip_tunnel_core_init(void)
index a6b8c1a4102ba7ab07efbcf504fa7ca4025c6f19..0a783cd73faf25d9ec4d7605759038e4e0aef345 100644 (file)
@@ -144,7 +144,12 @@ clusterip_config_find_get(struct net *net, __be32 clusterip, int entry)
        rcu_read_lock_bh();
        c = __clusterip_config_find(net, clusterip);
        if (c) {
-               if (!c->pde || unlikely(!atomic_inc_not_zero(&c->refcount)))
+#ifdef CONFIG_PROC_FS
+               if (!c->pde)
+                       c = NULL;
+               else
+#endif
+               if (unlikely(!atomic_inc_not_zero(&c->refcount)))
                        c = NULL;
                else if (entry)
                        atomic_inc(&c->entries);
index f273098e48fd5bbe0ffb95c0daeb315c3a6f06f1..37fb9552e85898d0ee9b311f22af31563c621de7 100644 (file)
@@ -63,10 +63,10 @@ static bool rpfilter_lookup_reverse(struct net *net, struct flowi4 *fl4,
        return dev_match || flags & XT_RPFILTER_LOOSE;
 }
 
-static bool rpfilter_is_local(const struct sk_buff *skb)
+static bool
+rpfilter_is_loopback(const struct sk_buff *skb, const struct net_device *in)
 {
-       const struct rtable *rt = skb_rtable(skb);
-       return rt && (rt->rt_flags & RTCF_LOCAL);
+       return skb->pkt_type == PACKET_LOOPBACK || in->flags & IFF_LOOPBACK;
 }
 
 static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
@@ -79,7 +79,7 @@ static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
        info = par->matchinfo;
        invert = info->flags & XT_RPFILTER_INVERT;
 
-       if (rpfilter_is_local(skb))
+       if (rpfilter_is_loopback(skb, xt_in(par)))
                return true ^ invert;
 
        iph = ip_hdr(skb);
index fd8220213afc36375f83d0af902443cf2f5a4a8f..146d86105183e1a456a0f17ed6bb5371aa1e8f76 100644 (file)
@@ -126,6 +126,8 @@ void nf_send_reset(struct net *net, struct sk_buff *oldskb, int hook)
        /* ip_route_me_harder expects skb->dst to be set */
        skb_dst_set_noref(nskb, skb_dst(oldskb));
 
+       nskb->mark = IP4_REPLY_MARK(net, oldskb->mark);
+
        skb_reserve(nskb, LL_MAX_HEADER);
        niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP,
                                   ip4_dst_hoplimit(skb_dst(nskb)));
index 965b1a161369323e37dd5f103c34cbc3e4e901b5..2981291910dd2cac2d508fcde89083afc22affd4 100644 (file)
@@ -26,13 +26,6 @@ static __be32 get_saddr(__be32 addr)
        return addr;
 }
 
-static bool fib4_is_local(const struct sk_buff *skb)
-{
-       const struct rtable *rt = skb_rtable(skb);
-
-       return rt && (rt->rt_flags & RTCF_LOCAL);
-}
-
 #define DSCP_BITS     0xfc
 
 void nft_fib4_eval_type(const struct nft_expr *expr, struct nft_regs *regs,
@@ -95,8 +88,10 @@ void nft_fib4_eval(const struct nft_expr *expr, struct nft_regs *regs,
        else
                oif = NULL;
 
-       if (nft_hook(pkt) == NF_INET_PRE_ROUTING && fib4_is_local(pkt->skb)) {
-               nft_fib_store_result(dest, priv->result, pkt, LOOPBACK_IFINDEX);
+       if (nft_hook(pkt) == NF_INET_PRE_ROUTING &&
+           nft_fib_is_loopback(pkt->skb, nft_in(pkt))) {
+               nft_fib_store_result(dest, priv->result, pkt,
+                                    nft_in(pkt)->ifindex);
                return;
        }
 
@@ -131,7 +126,7 @@ void nft_fib4_eval(const struct nft_expr *expr, struct nft_regs *regs,
        switch (res.type) {
        case RTN_UNICAST:
                break;
-       case RTN_LOCAL: /* should not appear here, see fib4_is_local() above */
+       case RTN_LOCAL: /* Should not see RTN_LOCAL here */
                return;
        default:
                break;
index 86cca610f4c2c368476dee3602d1f74ac2cd5794..68d77b1f1495bb8dace1f6aa9c0e9a6ee5b2e5dd 100644 (file)
@@ -642,6 +642,8 @@ static int ping_v4_push_pending_frames(struct sock *sk, struct pingfakehdr *pfh,
 {
        struct sk_buff *skb = skb_peek(&sk->sk_write_queue);
 
+       if (!skb)
+               return 0;
        pfh->wcheck = csum_partial((char *)&pfh->icmph,
                sizeof(struct icmphdr), pfh->wcheck);
        pfh->icmph.checksum = csum_fold(pfh->wcheck);
index 4a044964da6670829e5c47fef52d2cd76360b59f..0efb4c7f6704f662b6c762e48698a41564add2a4 100644 (file)
@@ -770,6 +770,12 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
                                ret = -EAGAIN;
                                break;
                        }
+                       /* if __tcp_splice_read() got nothing while we have
+                        * an skb in receive queue, we do not want to loop.
+                        * This might happen with URG data.
+                        */
+                       if (!skb_queue_empty(&sk->sk_receive_queue))
+                               break;
                        sk_wait_data(sk, &timeo, NULL);
                        if (signal_pending(current)) {
                                ret = sock_intr_errno(timeo);
index f51919535ca763d54c25a48534256150de7b66a7..dd2560c83a8592359af70919bddb4a628630b0b3 100644 (file)
@@ -205,6 +205,7 @@ static struct sock *tcp_fastopen_create_child(struct sock *sk,
         * scaled. So correct it appropriately.
         */
        tp->snd_wnd = ntohs(tcp_hdr(skb)->window);
+       tp->max_window = tp->snd_wnd;
 
        /* Activate the retrans timer so that SYNACK can be retransmitted.
         * The request socket is not added to the ehash
index 6c790754ae3ebfad5d31499eabbed7b5c3b360c4..41dcbd568cbe2403f2a9e659669afe462a42e228 100644 (file)
@@ -5078,7 +5078,7 @@ static void tcp_check_space(struct sock *sk)
        if (sock_flag(sk, SOCK_QUEUE_SHRUNK)) {
                sock_reset_flag(sk, SOCK_QUEUE_SHRUNK);
                /* pairs with tcp_poll() */
-               smp_mb__after_atomic();
+               smp_mb();
                if (sk->sk_socket &&
                    test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
                        tcp_new_space(sk);
index 1d5331a1b1dc2677316148ba9852c191e7ed0fd4..8ce50dc3ab8cac821b8a2c3e0d31f0aa42f5c9d5 100644 (file)
@@ -2518,9 +2518,11 @@ u32 __tcp_select_window(struct sock *sk)
        int full_space = min_t(int, tp->window_clamp, allowed_space);
        int window;
 
-       if (mss > full_space)
+       if (unlikely(mss > full_space)) {
                mss = full_space;
-
+               if (mss <= 0)
+                       return 0;
+       }
        if (free_space < (full_space >> 1)) {
                icsk->icsk_ack.quick = 0;
 
index f6c50af24a64737672f7ede2ff41158bfed5f1b4..3d063eb3784828b142874c92fd2db026bea0f3b3 100644 (file)
@@ -117,7 +117,7 @@ static void jtcp_rcv_established(struct sock *sk, struct sk_buff *skb,
             (fwmark > 0 && skb->mark == fwmark)) &&
            (full || tp->snd_cwnd != tcp_probe.lastcwnd)) {
 
-               spin_lock(&tcp_probe.lock);
+               spin_lock_bh(&tcp_probe.lock);
                /* If log fills, just silently drop */
                if (tcp_probe_avail() > 1) {
                        struct tcp_log *p = tcp_probe.log + tcp_probe.head;
@@ -157,7 +157,7 @@ static void jtcp_rcv_established(struct sock *sk, struct sk_buff *skb,
                        tcp_probe.head = (tcp_probe.head + 1) & (bufsize - 1);
                }
                tcp_probe.lastcwnd = tp->snd_cwnd;
-               spin_unlock(&tcp_probe.lock);
+               spin_unlock_bh(&tcp_probe.lock);
 
                wake_up(&tcp_probe.wait);
        }
index 1307a7c2e5445d37d1d1c4f5fbfd5bf7c29040d7..8aab7d78d25bc6eaa42dcc960cdbd5086f614cad 100644 (file)
@@ -1501,7 +1501,7 @@ try_again:
        return err;
 
 csum_copy_err:
-       if (!__sk_queue_drop_skb(sk, skb, flags)) {
+       if (!__sk_queue_drop_skb(sk, skb, flags, udp_skb_destructor)) {
                UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
                UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
        }
index c1e124bc8e1e3fcab42290acd8888e42d93ccf50..a7bcc0ab5e99543a08410abe6ff3dbfc9b3753b7 100644 (file)
@@ -3386,9 +3386,15 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
                        }
 
                        if (idev) {
-                               if (idev->if_flags & IF_READY)
-                                       /* device is already configured. */
+                               if (idev->if_flags & IF_READY) {
+                                       /* device is already configured -
+                                        * but resend MLD reports, we might
+                                        * have roamed and need to update
+                                        * multicast snooping switches
+                                        */
+                                       ipv6_mc_up(idev);
                                        break;
+                               }
                                idev->if_flags |= IF_READY;
                        }
 
@@ -4009,6 +4015,12 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id)
 
        if (bump_id)
                rt_genid_bump_ipv6(dev_net(dev));
+
+       /* Make sure that a new temporary address will be created
+        * before this temporary address becomes deprecated.
+        */
+       if (ifp->flags & IFA_F_TEMPORARY)
+               addrconf_verify_rtnl();
 }
 
 static void addrconf_dad_run(struct inet6_dev *idev)
@@ -5540,8 +5552,7 @@ static void addrconf_disable_change(struct net *net, __s32 newf)
        struct net_device *dev;
        struct inet6_dev *idev;
 
-       rcu_read_lock();
-       for_each_netdev_rcu(net, dev) {
+       for_each_netdev(net, dev) {
                idev = __in6_dev_get(dev);
                if (idev) {
                        int changed = (!idev->cnf.disable_ipv6) ^ (!newf);
@@ -5550,7 +5561,6 @@ static void addrconf_disable_change(struct net *net, __s32 newf)
                                dev_disable_change(idev);
                }
        }
-       rcu_read_unlock();
 }
 
 static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int newf)
index a3eaafd8710091c0484a5c608862d13808d612b3..eec27f87efaca15133cf1d5225e37e6a2f6a6f8a 100644 (file)
@@ -167,18 +167,22 @@ int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr,
        if (np->sndflow)
                fl6_flowlabel = usin->sin6_flowinfo & IPV6_FLOWINFO_MASK;
 
-       addr_type = ipv6_addr_type(&usin->sin6_addr);
-
-       if (addr_type == IPV6_ADDR_ANY) {
+       if (ipv6_addr_any(&usin->sin6_addr)) {
                /*
                 *      connect to self
                 */
-               usin->sin6_addr.s6_addr[15] = 0x01;
+               if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
+                       ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
+                                              &usin->sin6_addr);
+               else
+                       usin->sin6_addr = in6addr_loopback;
        }
 
+       addr_type = ipv6_addr_type(&usin->sin6_addr);
+
        daddr = &usin->sin6_addr;
 
-       if (addr_type == IPV6_ADDR_MAPPED) {
+       if (addr_type & IPV6_ADDR_MAPPED) {
                struct sockaddr_in sin;
 
                if (__ipv6_only_sock(sk)) {
index e4198502fd98ce55c1ab6d4d9767b8506b2cf707..275cac628a95066f0a27e93f5015ddeb0172c28c 100644 (file)
@@ -327,7 +327,6 @@ static int ipv6_srh_rcv(struct sk_buff *skb)
        struct ipv6_sr_hdr *hdr;
        struct inet6_dev *idev;
        struct in6_addr *addr;
-       bool cleanup = false;
        int accept_seg6;
 
        hdr = (struct ipv6_sr_hdr *)skb_transport_header(skb);
@@ -351,11 +350,7 @@ static int ipv6_srh_rcv(struct sk_buff *skb)
 #endif
 
 looped_back:
-       if (hdr->segments_left > 0) {
-               if (hdr->nexthdr != NEXTHDR_IPV6 && hdr->segments_left == 1 &&
-                   sr_has_cleanup(hdr))
-                       cleanup = true;
-       } else {
+       if (hdr->segments_left == 0) {
                if (hdr->nexthdr == NEXTHDR_IPV6) {
                        int offset = (hdr->hdrlen + 1) << 3;
 
@@ -418,21 +413,6 @@ looped_back:
 
        ipv6_hdr(skb)->daddr = *addr;
 
-       if (cleanup) {
-               int srhlen = (hdr->hdrlen + 1) << 3;
-               int nh = hdr->nexthdr;
-
-               skb_pull_rcsum(skb, sizeof(struct ipv6hdr) + srhlen);
-               memmove(skb_network_header(skb) + srhlen,
-                       skb_network_header(skb),
-                       (unsigned char *)hdr - skb_network_header(skb));
-               skb->network_header += srhlen;
-               ipv6_hdr(skb)->nexthdr = nh;
-               ipv6_hdr(skb)->payload_len = htons(skb->len -
-                                                  sizeof(struct ipv6hdr));
-               skb_push_rcsum(skb, sizeof(struct ipv6hdr));
-       }
-
        skb_dst_drop(skb);
 
        ip6_route_input(skb);
@@ -453,13 +433,8 @@ looped_back:
                }
                ipv6_hdr(skb)->hop_limit--;
 
-               /* be sure that srh is still present before reinjecting */
-               if (!cleanup) {
-                       skb_pull(skb, sizeof(struct ipv6hdr));
-                       goto looped_back;
-               }
-               skb_set_transport_header(skb, sizeof(struct ipv6hdr));
-               IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr);
+               skb_pull(skb, sizeof(struct ipv6hdr));
+               goto looped_back;
        }
 
        dst_input(skb);
index a7bc54ab46e2d10ab18abdc3fcef511f06574343..13b5e85fe0d56471ab792b1e75801def3800ee9c 100644 (file)
@@ -238,6 +238,7 @@ static const struct lwtunnel_encap_ops ila_encap_ops = {
        .fill_encap = ila_fill_encap_info,
        .get_encap_size = ila_encap_nlsize,
        .cmp_encap = ila_encap_cmp,
+       .owner = THIS_MODULE,
 };
 
 int ila_lwt_init(void)
index 7396e75e161b83ee2b8a426bce0eb770884960dd..75c308239243305a508e62d01814d88da7248018 100644 (file)
@@ -176,7 +176,7 @@ int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl_unused
        /* Restore final destination back after routing done */
        fl6.daddr = sk->sk_v6_daddr;
 
-       res = ip6_xmit(sk, skb, &fl6, rcu_dereference(np->opt),
+       res = ip6_xmit(sk, skb, &fl6, sk->sk_mark, rcu_dereference(np->opt),
                       np->tclass);
        rcu_read_unlock();
        return res;
index 75b6108234dd05a54af0ae51c7c11eaf1ca26d75..630b73be599977599c0021849fc6eb689cfefad7 100644 (file)
@@ -367,35 +367,37 @@ static void ip6gre_tunnel_uninit(struct net_device *dev)
 
 
 static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
-               u8 type, u8 code, int offset, __be32 info)
+                      u8 type, u8 code, int offset, __be32 info)
 {
-       const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)skb->data;
-       __be16 *p = (__be16 *)(skb->data + offset);
-       int grehlen = offset + 4;
+       const struct gre_base_hdr *greh;
+       const struct ipv6hdr *ipv6h;
+       int grehlen = sizeof(*greh);
        struct ip6_tnl *t;
+       int key_off = 0;
        __be16 flags;
+       __be32 key;
 
-       flags = p[0];
-       if (flags&(GRE_CSUM|GRE_KEY|GRE_SEQ|GRE_ROUTING|GRE_VERSION)) {
-               if (flags&(GRE_VERSION|GRE_ROUTING))
-                       return;
-               if (flags&GRE_KEY) {
-                       grehlen += 4;
-                       if (flags&GRE_CSUM)
-                               grehlen += 4;
-               }
+       if (!pskb_may_pull(skb, offset + grehlen))
+               return;
+       greh = (const struct gre_base_hdr *)(skb->data + offset);
+       flags = greh->flags;
+       if (flags & (GRE_VERSION | GRE_ROUTING))
+               return;
+       if (flags & GRE_CSUM)
+               grehlen += 4;
+       if (flags & GRE_KEY) {
+               key_off = grehlen + offset;
+               grehlen += 4;
        }
 
-       /* If only 8 bytes returned, keyed message will be dropped here */
-       if (!pskb_may_pull(skb, grehlen))
+       if (!pskb_may_pull(skb, offset + grehlen))
                return;
        ipv6h = (const struct ipv6hdr *)skb->data;
-       p = (__be16 *)(skb->data + offset);
+       greh = (const struct gre_base_hdr *)(skb->data + offset);
+       key = key_off ? *(__be32 *)(skb->data + key_off) : 0;
 
        t = ip6gre_tunnel_lookup(skb->dev, &ipv6h->daddr, &ipv6h->saddr,
-                               flags & GRE_KEY ?
-                               *(((__be32 *)p) + (grehlen / 4) - 1) : 0,
-                               p[1]);
+                                key, greh->protocol);
        if (!t)
                return;
 
@@ -582,6 +584,9 @@ static inline int ip6gre_xmit_ipv6(struct sk_buff *skb, struct net_device *dev)
                return -1;
 
        offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
+       /* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */
+       ipv6h = ipv6_hdr(skb);
+
        if (offset > 0) {
                struct ipv6_tlv_tnl_enc_lim *tel;
                tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset];
index 38122d04fadc646c27a5ccdf0eef5eb6d7923a27..7cebee58e55b7f6f23279ac2515a69df936b5712 100644 (file)
@@ -172,7 +172,7 @@ int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
  * which are using proper atomic operations or spinlocks.
  */
 int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
-            struct ipv6_txoptions *opt, int tclass)
+            __u32 mark, struct ipv6_txoptions *opt, int tclass)
 {
        struct net *net = sock_net(sk);
        const struct ipv6_pinfo *np = inet6_sk(sk);
@@ -240,7 +240,7 @@ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
 
        skb->protocol = htons(ETH_P_IPV6);
        skb->priority = sk->sk_priority;
-       skb->mark = sk->sk_mark;
+       skb->mark = mark;
 
        mtu = dst_mtu(dst);
        if ((skb->len <= mtu) || skb->ignore_df || skb_is_gso(skb)) {
@@ -1021,6 +1021,11 @@ static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk,
                }
        }
 #endif
+       if (ipv6_addr_v4mapped(&fl6->saddr) &&
+           !(ipv6_addr_v4mapped(&fl6->daddr) || ipv6_addr_any(&fl6->daddr))) {
+               err = -EAFNOSUPPORT;
+               goto out_err_release;
+       }
 
        return 0;
 
@@ -1344,7 +1349,7 @@ emsgsize:
         */
        if (transhdrlen && sk->sk_protocol == IPPROTO_UDP &&
            headersize == sizeof(struct ipv6hdr) &&
-           length < mtu - headersize &&
+           length <= mtu - headersize &&
            !(flags & MSG_MORE) &&
            rt->dst.dev->features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM))
                csummode = CHECKSUM_PARTIAL;
index 753d6d0860fb14c100ab8b20799782ab81602635..75fac933c209a0f430279dea10b5dd2426a7ed31 100644 (file)
@@ -400,18 +400,19 @@ ip6_tnl_dev_uninit(struct net_device *dev)
 
 __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
 {
-       const struct ipv6hdr *ipv6h = (const struct ipv6hdr *) raw;
-       __u8 nexthdr = ipv6h->nexthdr;
-       __u16 off = sizeof(*ipv6h);
+       const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)raw;
+       unsigned int nhoff = raw - skb->data;
+       unsigned int off = nhoff + sizeof(*ipv6h);
+       u8 next, nexthdr = ipv6h->nexthdr;
 
        while (ipv6_ext_hdr(nexthdr) && nexthdr != NEXTHDR_NONE) {
-               __u16 optlen = 0;
                struct ipv6_opt_hdr *hdr;
-               if (raw + off + sizeof(*hdr) > skb->data &&
-                   !pskb_may_pull(skb, raw - skb->data + off + sizeof (*hdr)))
+               u16 optlen;
+
+               if (!pskb_may_pull(skb, off + sizeof(*hdr)))
                        break;
 
-               hdr = (struct ipv6_opt_hdr *) (raw + off);
+               hdr = (struct ipv6_opt_hdr *)(skb->data + off);
                if (nexthdr == NEXTHDR_FRAGMENT) {
                        struct frag_hdr *frag_hdr = (struct frag_hdr *) hdr;
                        if (frag_hdr->frag_off)
@@ -422,20 +423,29 @@ __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
                } else {
                        optlen = ipv6_optlen(hdr);
                }
+               /* cache hdr->nexthdr, since pskb_may_pull() might
+                * invalidate hdr
+                */
+               next = hdr->nexthdr;
                if (nexthdr == NEXTHDR_DEST) {
-                       __u16 i = off + 2;
+                       u16 i = 2;
+
+                       /* Remember : hdr is no longer valid at this point. */
+                       if (!pskb_may_pull(skb, off + optlen))
+                               break;
+
                        while (1) {
                                struct ipv6_tlv_tnl_enc_lim *tel;
 
                                /* No more room for encapsulation limit */
-                               if (i + sizeof (*tel) > off + optlen)
+                               if (i + sizeof(*tel) > optlen)
                                        break;
 
-                               tel = (struct ipv6_tlv_tnl_enc_lim *) &raw[i];
+                               tel = (struct ipv6_tlv_tnl_enc_lim *)(skb->data + off + i);
                                /* return index of option if found and valid */
                                if (tel->type == IPV6_TLV_TNL_ENCAP_LIMIT &&
                                    tel->length == 1)
-                                       return i;
+                                       return i + off - nhoff;
                                /* else jump to next option */
                                if (tel->type)
                                        i += tel->length + 2;
@@ -443,7 +453,7 @@ __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
                                        i++;
                        }
                }
-               nexthdr = hdr->nexthdr;
+               nexthdr = next;
                off += optlen;
        }
        return 0;
@@ -1303,6 +1313,8 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
                fl6.flowlabel = key->label;
        } else {
                offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
+               /* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */
+               ipv6h = ipv6_hdr(skb);
                if (offset > 0) {
                        struct ipv6_tlv_tnl_enc_lim *tel;
 
index 7139fffd61b6f764a9d0ae02ed41365afa3ab55c..1bdc703cb9668bd77690c3d8f1ec0062d7b88c43 100644 (file)
@@ -779,6 +779,7 @@ static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
                                psf->sf_crcount = im->mca_crcount;
                }
                in6_dev_put(pmc->idev);
+               kfree(pmc);
        }
        spin_unlock_bh(&im->mca_lock);
 }
index d5263dc364a97a2a5530e69defd46df225b004b1..b12e61b7b16ce9f3f98a0906558c98803a48a9a3 100644 (file)
@@ -72,10 +72,10 @@ static bool rpfilter_lookup_reverse6(struct net *net, const struct sk_buff *skb,
        return ret;
 }
 
-static bool rpfilter_is_local(const struct sk_buff *skb)
+static bool
+rpfilter_is_loopback(const struct sk_buff *skb, const struct net_device *in)
 {
-       const struct rt6_info *rt = (const void *) skb_dst(skb);
-       return rt && (rt->rt6i_flags & RTF_LOCAL);
+       return skb->pkt_type == PACKET_LOOPBACK || in->flags & IFF_LOOPBACK;
 }
 
 static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
@@ -85,7 +85,7 @@ static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
        struct ipv6hdr *iph;
        bool invert = info->flags & XT_RPFILTER_INVERT;
 
-       if (rpfilter_is_local(skb))
+       if (rpfilter_is_loopback(skb, xt_in(par)))
                return true ^ invert;
 
        iph = ipv6_hdr(skb);
index 10090400c72f19b7dd21d6543a2c1d740a9bd595..eedee5d108d98422eab5753d7c619b269e07685e 100644 (file)
@@ -157,6 +157,7 @@ void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook)
        fl6.fl6_sport = otcph->dest;
        fl6.fl6_dport = otcph->source;
        fl6.flowi6_oif = l3mdev_master_ifindex(skb_dst(oldskb)->dev);
+       fl6.flowi6_mark = IP6_REPLY_MARK(net, oldskb->mark);
        security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6));
        dst = ip6_route_output(net, NULL, &fl6);
        if (dst->error) {
@@ -180,6 +181,8 @@ void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook)
 
        skb_dst_set(nskb, dst);
 
+       nskb->mark = fl6.flowi6_mark;
+
        skb_reserve(nskb, hh_len + dst->header_len);
        ip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP,
                                    ip6_dst_hoplimit(dst));
index c947aad8bcc620e87cacd942d146cdfb7ad2a73e..765facf03d45c47b9913b1adcdaf59b6fe09383c 100644 (file)
 #include <net/ip6_fib.h>
 #include <net/ip6_route.h>
 
-static bool fib6_is_local(const struct sk_buff *skb)
-{
-       const struct rt6_info *rt = (const void *)skb_dst(skb);
-
-       return rt && (rt->rt6i_flags & RTF_LOCAL);
-}
-
 static int get_ifindex(const struct net_device *dev)
 {
        return dev ? dev->ifindex : 0;
@@ -164,8 +157,10 @@ void nft_fib6_eval(const struct nft_expr *expr, struct nft_regs *regs,
 
        lookup_flags = nft_fib6_flowi_init(&fl6, priv, pkt, oif);
 
-       if (nft_hook(pkt) == NF_INET_PRE_ROUTING && fib6_is_local(pkt->skb)) {
-               nft_fib_store_result(dest, priv->result, pkt, LOOPBACK_IFINDEX);
+       if (nft_hook(pkt) == NF_INET_PRE_ROUTING &&
+           nft_fib_is_loopback(pkt->skb, nft_in(pkt))) {
+               nft_fib_store_result(dest, priv->result, pkt,
+                                    nft_in(pkt)->ifindex);
                return;
        }
 
index 4f6b067c8753a541bc20d6d9f6f4c5009c8956cb..7ea85370c11c81e8743e6a3086a32a0011f35d17 100644 (file)
@@ -2896,6 +2896,11 @@ static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
        if (tb[RTA_MULTIPATH]) {
                cfg->fc_mp = nla_data(tb[RTA_MULTIPATH]);
                cfg->fc_mp_len = nla_len(tb[RTA_MULTIPATH]);
+
+               err = lwtunnel_valid_encap_type_attr(cfg->fc_mp,
+                                                    cfg->fc_mp_len);
+               if (err < 0)
+                       goto errout;
        }
 
        if (tb[RTA_PREF]) {
@@ -2909,9 +2914,14 @@ static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
        if (tb[RTA_ENCAP])
                cfg->fc_encap = tb[RTA_ENCAP];
 
-       if (tb[RTA_ENCAP_TYPE])
+       if (tb[RTA_ENCAP_TYPE]) {
                cfg->fc_encap_type = nla_get_u16(tb[RTA_ENCAP_TYPE]);
 
+               err = lwtunnel_valid_encap_type(cfg->fc_encap_type);
+               if (err < 0)
+                       goto errout;
+       }
+
        if (tb[RTA_EXPIRES]) {
                unsigned long timeout = addrconf_timeout_fixup(nla_get_u32(tb[RTA_EXPIRES]), HZ);
 
index b172d85c650a376f541ea05d72046c76b8404303..a855eb325b030a666fe92c56a2d432c77d9dfe7a 100644 (file)
@@ -176,6 +176,8 @@ static int seg6_genl_set_tunsrc(struct sk_buff *skb, struct genl_info *info)
 
        val = nla_data(info->attrs[SEG6_ATTR_DST]);
        t_new = kmemdup(val, sizeof(*val), GFP_KERNEL);
+       if (!t_new)
+               return -ENOMEM;
 
        mutex_lock(&sdata->lock);
 
index 03a064803626890ade73073cc12735aec777f9e5..6ef3dfb6e811642f1fc9b680e0b255a9399bb024 100644 (file)
@@ -174,7 +174,7 @@ int seg6_hmac_compute(struct seg6_hmac_info *hinfo, struct ipv6_sr_hdr *hdr,
         * hash function (RadioGatun) with up to 1216 bits
         */
 
-       /* saddr(16) + first_seg(1) + cleanup(1) + keyid(4) + seglist(16n) */
+       /* saddr(16) + first_seg(1) + flags(1) + keyid(4) + seglist(16n) */
        plen = 16 + 1 + 1 + 4 + (hdr->first_segment + 1) * 16;
 
        /* this limit allows for 14 segments */
@@ -186,7 +186,7 @@ int seg6_hmac_compute(struct seg6_hmac_info *hinfo, struct ipv6_sr_hdr *hdr,
         *
         * 1. Source IPv6 address (128 bits)
         * 2. first_segment value (8 bits)
-        * 3. cleanup flag (8 bits: highest bit is cleanup value, others are 0)
+        * 3. Flags (8 bits)
         * 4. HMAC Key ID (32 bits)
         * 5. All segments in the segments list (n * 128 bits)
         */
@@ -202,8 +202,8 @@ int seg6_hmac_compute(struct seg6_hmac_info *hinfo, struct ipv6_sr_hdr *hdr,
        /* first_segment value */
        *off++ = hdr->first_segment;
 
-       /* cleanup flag */
-       *off++ = !!(sr_has_cleanup(hdr)) << 7;
+       /* flags */
+       *off++ = hdr->flags;
 
        /* HMAC Key ID */
        memcpy(off, &hmackeyid, 4);
index 1d60cb132835c9f9089510f035a1ca95e5b1e1a7..c46f8cbf5ab5aa4031d4080d70079e99859d4eb4 100644 (file)
@@ -422,6 +422,7 @@ static const struct lwtunnel_encap_ops seg6_iptun_ops = {
        .fill_encap = seg6_fill_encap_info,
        .get_encap_size = seg6_encap_nlsize,
        .cmp_encap = seg6_encap_cmp,
+       .owner = THIS_MODULE,
 };
 
 int __init seg6_iptunnel_init(void)
index fad992ad4bc83e8fa0dbdae194a4f8b54e28efa2..99853c6e33a8c3def99ecb56e288cce4a38a997b 100644 (file)
@@ -1380,6 +1380,7 @@ static int ipip6_tunnel_init(struct net_device *dev)
        err = dst_cache_init(&tunnel->dst_cache, GFP_KERNEL);
        if (err) {
                free_percpu(dev->tstats);
+               dev->tstats = NULL;
                return err;
        }
 
index 73bc8fc68acdc1ed947bfdb7624b3308a9b70806..4c60c6f71cd30bf18f270c3d994f193ad13045ae 100644 (file)
@@ -148,8 +148,13 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
         *      connect() to INADDR_ANY means loopback (BSD'ism).
         */
 
-       if (ipv6_addr_any(&usin->sin6_addr))
-               usin->sin6_addr.s6_addr[15] = 0x1;
+       if (ipv6_addr_any(&usin->sin6_addr)) {
+               if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
+                       ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
+                                              &usin->sin6_addr);
+               else
+                       usin->sin6_addr = in6addr_loopback;
+       }
 
        addr_type = ipv6_addr_type(&usin->sin6_addr);
 
@@ -188,7 +193,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
         *      TCP over IPv4
         */
 
-       if (addr_type == IPV6_ADDR_MAPPED) {
+       if (addr_type & IPV6_ADDR_MAPPED) {
                u32 exthdrlen = icsk->icsk_ext_hdr_len;
                struct sockaddr_in sin;
 
@@ -469,7 +474,7 @@ static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
                opt = ireq->ipv6_opt;
                if (!opt)
                        opt = rcu_dereference(np->opt);
-               err = ip6_xmit(sk, skb, fl6, opt, np->tclass);
+               err = ip6_xmit(sk, skb, fl6, sk->sk_mark, opt, np->tclass);
                rcu_read_unlock();
                err = net_xmit_eval(err);
        }
@@ -840,7 +845,7 @@ static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32
        dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
        if (!IS_ERR(dst)) {
                skb_dst_set(buff, dst);
-               ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
+               ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL, tclass);
                TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
                if (rst)
                        TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
@@ -991,6 +996,16 @@ drop:
        return 0; /* don't send reset */
 }
 
+static void tcp_v6_restore_cb(struct sk_buff *skb)
+{
+       /* We need to move header back to the beginning if xfrm6_policy_check()
+        * and tcp_v6_fill_cb() are going to be called again.
+        * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
+        */
+       memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
+               sizeof(struct inet6_skb_parm));
+}
+
 static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
                                         struct request_sock *req,
                                         struct dst_entry *dst,
@@ -1182,8 +1197,10 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
                                                      sk_gfp_mask(sk, GFP_ATOMIC));
                        consume_skb(ireq->pktopts);
                        ireq->pktopts = NULL;
-                       if (newnp->pktoptions)
+                       if (newnp->pktoptions) {
+                               tcp_v6_restore_cb(newnp->pktoptions);
                                skb_set_owner_r(newnp->pktoptions, newsk);
+                       }
                }
        }
 
@@ -1198,16 +1215,6 @@ out:
        return NULL;
 }
 
-static void tcp_v6_restore_cb(struct sk_buff *skb)
-{
-       /* We need to move header back to the beginning if xfrm6_policy_check()
-        * and tcp_v6_fill_cb() are going to be called again.
-        * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
-        */
-       memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
-               sizeof(struct inet6_skb_parm));
-}
-
 /* The socket must have it's spinlock held when we get
  * here, unless it is a TCP_LISTEN socket.
  *
index 4d5c4eee4b3f506cf030bb9bce20c5b94086e011..221825a9407afebba47106f60729f91e2992158c 100644 (file)
@@ -441,7 +441,7 @@ try_again:
        return err;
 
 csum_copy_err:
-       if (!__sk_queue_drop_skb(sk, skb, flags)) {
+       if (!__sk_queue_drop_skb(sk, skb, flags, udp_skb_destructor)) {
                if (is_udp4) {
                        UDP_INC_STATS(sock_net(sk),
                                      UDP_MIB_CSUMERRORS, is_udplite);
@@ -1033,6 +1033,10 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
                        if (addr_len < SIN6_LEN_RFC2133)
                                return -EINVAL;
                        daddr = &sin6->sin6_addr;
+                       if (ipv6_addr_any(daddr) &&
+                           ipv6_addr_v4mapped(&np->saddr))
+                               ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
+                                                      daddr);
                        break;
                case AF_INET:
                        goto do_udp_sendmsg;
index acbe61c7e6831205ca1d725f65263c849ef092e4..160dc89335e207e593401f43c0637a766bdb9b68 100644 (file)
@@ -383,9 +383,6 @@ EXPORT_SYMBOL(hashbin_new);
  *    for deallocating this structure if it's complex. If not the user can
  *    just supply kfree, which should take care of the job.
  */
-#ifdef CONFIG_LOCKDEP
-static int hashbin_lock_depth = 0;
-#endif
 int hashbin_delete( hashbin_t* hashbin, FREE_FUNC free_func)
 {
        irda_queue_t* queue;
@@ -396,22 +393,27 @@ int hashbin_delete( hashbin_t* hashbin, FREE_FUNC free_func)
        IRDA_ASSERT(hashbin->magic == HB_MAGIC, return -1;);
 
        /* Synchronize */
-       if ( hashbin->hb_type & HB_LOCK ) {
-               spin_lock_irqsave_nested(&hashbin->hb_spinlock, flags,
-                                        hashbin_lock_depth++);
-       }
+       if (hashbin->hb_type & HB_LOCK)
+               spin_lock_irqsave(&hashbin->hb_spinlock, flags);
 
        /*
         *  Free the entries in the hashbin, TODO: use hashbin_clear when
         *  it has been shown to work
         */
        for (i = 0; i < HASHBIN_SIZE; i ++ ) {
-               queue = dequeue_first((irda_queue_t**) &hashbin->hb_queue[i]);
-               while (queue ) {
-                       if (free_func)
-                               (*free_func)(queue);
-                       queue = dequeue_first(
-                               (irda_queue_t**) &hashbin->hb_queue[i]);
+               while (1) {
+                       queue = dequeue_first((irda_queue_t**) &hashbin->hb_queue[i]);
+
+                       if (!queue)
+                               break;
+
+                       if (free_func) {
+                               if (hashbin->hb_type & HB_LOCK)
+                                       spin_unlock_irqrestore(&hashbin->hb_spinlock, flags);
+                               free_func(queue);
+                               if (hashbin->hb_type & HB_LOCK)
+                                       spin_lock_irqsave(&hashbin->hb_spinlock, flags);
+                       }
                }
        }
 
@@ -420,12 +422,8 @@ int hashbin_delete( hashbin_t* hashbin, FREE_FUNC free_func)
        hashbin->magic = ~HB_MAGIC;
 
        /* Release lock */
-       if ( hashbin->hb_type & HB_LOCK) {
+       if (hashbin->hb_type & HB_LOCK)
                spin_unlock_irqrestore(&hashbin->hb_spinlock, flags);
-#ifdef CONFIG_LOCKDEP
-               hashbin_lock_depth--;
-#endif
-       }
 
        /*
         *  Free the hashbin structure
index 7e08a4d3d77d0650e9f0c50cd0ab9a8568147f37..a646f348124095c1bdff2d14a77ea5c0ac892bc1 100644 (file)
@@ -929,23 +929,25 @@ static int kcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
                        goto out_error;
        }
 
-       /* New message, alloc head skb */
-       head = alloc_skb(0, sk->sk_allocation);
-       while (!head) {
-               kcm_push(kcm);
-               err = sk_stream_wait_memory(sk, &timeo);
-               if (err)
-                       goto out_error;
-
+       if (msg_data_left(msg)) {
+               /* New message, alloc head skb */
                head = alloc_skb(0, sk->sk_allocation);
-       }
+               while (!head) {
+                       kcm_push(kcm);
+                       err = sk_stream_wait_memory(sk, &timeo);
+                       if (err)
+                               goto out_error;
 
-       skb = head;
+                       head = alloc_skb(0, sk->sk_allocation);
+               }
 
-       /* Set ip_summed to CHECKSUM_UNNECESSARY to avoid calling
-        * csum_and_copy_from_iter from skb_do_copy_data_nocache.
-        */
-       skb->ip_summed = CHECKSUM_UNNECESSARY;
+               skb = head;
+
+               /* Set ip_summed to CHECKSUM_UNNECESSARY to avoid calling
+                * csum_and_copy_from_iter from skb_do_copy_data_nocache.
+                */
+               skb->ip_summed = CHECKSUM_UNNECESSARY;
+       }
 
 start:
        while (msg_data_left(msg)) {
@@ -1018,10 +1020,12 @@ wait_for_memory:
        if (eor) {
                bool not_busy = skb_queue_empty(&sk->sk_write_queue);
 
-               /* Message complete, queue it on send buffer */
-               __skb_queue_tail(&sk->sk_write_queue, head);
-               kcm->seq_skb = NULL;
-               KCM_STATS_INCR(kcm->stats.tx_msgs);
+               if (head) {
+                       /* Message complete, queue it on send buffer */
+                       __skb_queue_tail(&sk->sk_write_queue, head);
+                       kcm->seq_skb = NULL;
+                       KCM_STATS_INCR(kcm->stats.tx_msgs);
+               }
 
                if (msg->msg_flags & MSG_BATCH) {
                        kcm->tx_wait_more = true;
@@ -1040,8 +1044,10 @@ wait_for_memory:
        } else {
                /* Message not complete, save state */
 partial_message:
-               kcm->seq_skb = head;
-               kcm_tx_msg(head)->last_skb = skb;
+               if (head) {
+                       kcm->seq_skb = head;
+                       kcm_tx_msg(head)->last_skb = skb;
+               }
        }
 
        KCM_STATS_ADD(kcm->stats.tx_bytes, copied);
index 8f560f7140a05694c13904d9b171ba67d9d11292..aebf281d09eeb31c531eb624bd2ddd78cab8da9b 100644 (file)
@@ -263,6 +263,7 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb,
 int l2tp_nl_register_ops(enum l2tp_pwtype pw_type,
                         const struct l2tp_nl_cmd_ops *ops);
 void l2tp_nl_unregister_ops(enum l2tp_pwtype pw_type);
+int l2tp_ioctl(struct sock *sk, int cmd, unsigned long arg);
 
 /* Session reference counts. Incremented when code obtains a reference
  * to a session.
index 3d73278b86ca34bfbd774dc8f52e490169445e1b..28c21546d5b60dcd07bbf6347389e97c918bf40f 100644 (file)
@@ -11,6 +11,7 @@
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
+#include <asm/ioctls.h>
 #include <linux/icmp.h>
 #include <linux/module.h>
 #include <linux/skbuff.h>
@@ -553,6 +554,30 @@ out:
        return err ? err : copied;
 }
 
+int l2tp_ioctl(struct sock *sk, int cmd, unsigned long arg)
+{
+       struct sk_buff *skb;
+       int amount;
+
+       switch (cmd) {
+       case SIOCOUTQ:
+               amount = sk_wmem_alloc_get(sk);
+               break;
+       case SIOCINQ:
+               spin_lock_bh(&sk->sk_receive_queue.lock);
+               skb = skb_peek(&sk->sk_receive_queue);
+               amount = skb ? skb->len : 0;
+               spin_unlock_bh(&sk->sk_receive_queue.lock);
+               break;
+
+       default:
+               return -ENOIOCTLCMD;
+       }
+
+       return put_user(amount, (int __user *)arg);
+}
+EXPORT_SYMBOL(l2tp_ioctl);
+
 static struct proto l2tp_ip_prot = {
        .name              = "L2TP/IP",
        .owner             = THIS_MODULE,
@@ -561,7 +586,7 @@ static struct proto l2tp_ip_prot = {
        .bind              = l2tp_ip_bind,
        .connect           = l2tp_ip_connect,
        .disconnect        = l2tp_ip_disconnect,
-       .ioctl             = udp_ioctl,
+       .ioctl             = l2tp_ioctl,
        .destroy           = l2tp_ip_destroy_sock,
        .setsockopt        = ip_setsockopt,
        .getsockopt        = ip_getsockopt,
index 331ccf5a7bad80e011997e071489d7775b0c68c6..f47c45250f86c9189e0a6bbfd92b21cbe2069406 100644 (file)
@@ -722,7 +722,7 @@ static struct proto l2tp_ip6_prot = {
        .bind              = l2tp_ip6_bind,
        .connect           = l2tp_ip6_connect,
        .disconnect        = l2tp_ip6_disconnect,
-       .ioctl             = udp_ioctl,
+       .ioctl             = l2tp_ioctl,
        .destroy           = l2tp_ip6_destroy_sock,
        .setsockopt        = ipv6_setsockopt,
        .getsockopt        = ipv6_getsockopt,
index 3e821daf9dd4a2fbf00550591e92b153efd4a73a..8bc5a1bd2d453542df31506f543feb64b64cdd96 100644 (file)
@@ -821,7 +821,10 @@ void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb)
                 * another trick required to cope with how the PROCOM state
                 * machine works. -acme
                 */
+               skb_orphan(skb);
+               sock_hold(sk);
                skb->sk = sk;
+               skb->destructor = sock_efree;
        }
        if (!sock_owned_by_user(sk))
                llc_conn_rcv(sk, skb);
index d0e1e804ebd73dcebcf2f930b921233a49b0f454..5404d0d195cc581613e356b75bd70321e617673e 100644 (file)
@@ -290,7 +290,10 @@ static void llc_sap_rcv(struct llc_sap *sap, struct sk_buff *skb,
 
        ev->type   = LLC_SAP_EV_TYPE_PDU;
        ev->reason = 0;
+       skb_orphan(skb);
+       sock_hold(sk);
        skb->sk = sk;
+       skb->destructor = sock_efree;
        llc_sap_state_process(sap, skb);
 }
 
index ecfdd97758a386ed9d642a3fc141f2ec6da13db2..5c3af5eb405232167bbd62a13b4d1f37370d6bc0 100644 (file)
@@ -124,7 +124,7 @@ static int aes_siv_encrypt(const u8 *key, size_t key_len,
 
        /* CTR */
 
-       tfm2 = crypto_alloc_skcipher("ctr(aes)", 0, 0);
+       tfm2 = crypto_alloc_skcipher("ctr(aes)", 0, CRYPTO_ALG_ASYNC);
        if (IS_ERR(tfm2)) {
                kfree(tmp);
                return PTR_ERR(tfm2);
@@ -183,7 +183,7 @@ static int aes_siv_decrypt(const u8 *key, size_t key_len,
 
        /* CTR */
 
-       tfm2 = crypto_alloc_skcipher("ctr(aes)", 0, 0);
+       tfm2 = crypto_alloc_skcipher("ctr(aes)", 0, CRYPTO_ALG_ASYNC);
        if (IS_ERR(tfm2))
                return PTR_ERR(tfm2);
        /* K2 for CTR */
@@ -272,7 +272,7 @@ int fils_encrypt_assoc_req(struct sk_buff *skb,
        crypt_len = skb->data + skb->len - encr;
        skb_put(skb, AES_BLOCK_SIZE);
        return aes_siv_encrypt(assoc_data->fils_kek, assoc_data->fils_kek_len,
-                              encr, crypt_len, 1, addr, len, encr);
+                              encr, crypt_len, 5, addr, len, encr);
 }
 
 int fils_decrypt_assoc_resp(struct ieee80211_sub_if_data *sdata,
index 42120d965263d2ec1719211da37b7900814e4122..50e1b7f78bd49605d2dbca4c215befecc1d8d001 100644 (file)
@@ -339,7 +339,7 @@ int mesh_add_vendor_ies(struct ieee80211_sub_if_data *sdata,
        /* fast-forward to vendor IEs */
        offset = ieee80211_ie_split_vendor(ifmsh->ie, ifmsh->ie_len, 0);
 
-       if (offset) {
+       if (offset < ifmsh->ie_len) {
                len = ifmsh->ie_len - offset;
                data = ifmsh->ie + offset;
                if (skb_tailroom(skb) < len)
index 9e2641d4558753b7bf746388a971ac337cef9349..206698bc93f406939bb5d883b6ab2f04bc1a3bed 100644 (file)
@@ -40,8 +40,6 @@ void rate_control_rate_init(struct sta_info *sta)
 
        ieee80211_sta_set_rx_nss(sta);
 
-       ieee80211_recalc_min_chandef(sta->sdata);
-
        if (!ref)
                return;
 
index 15fe97644ffe048c9b1d5818c0b1aec56eb988f4..5b77377e5a15474e39037be5e6e873ebceb33555 100644 (file)
@@ -98,18 +98,19 @@ bool mpls_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
 }
 EXPORT_SYMBOL_GPL(mpls_pkt_too_big);
 
-static u32 mpls_multipath_hash(struct mpls_route *rt,
-                              struct sk_buff *skb, bool bos)
+static u32 mpls_multipath_hash(struct mpls_route *rt, struct sk_buff *skb)
 {
        struct mpls_entry_decoded dec;
+       unsigned int mpls_hdr_len = 0;
        struct mpls_shim_hdr *hdr;
        bool eli_seen = false;
        int label_index;
        u32 hash = 0;
 
-       for (label_index = 0; label_index < MAX_MP_SELECT_LABELS && !bos;
+       for (label_index = 0; label_index < MAX_MP_SELECT_LABELS;
             label_index++) {
-               if (!pskb_may_pull(skb, sizeof(*hdr) * label_index))
+               mpls_hdr_len += sizeof(*hdr);
+               if (!pskb_may_pull(skb, mpls_hdr_len))
                        break;
 
                /* Read and decode the current label */
@@ -134,37 +135,38 @@ static u32 mpls_multipath_hash(struct mpls_route *rt,
                        eli_seen = true;
                }
 
-               bos = dec.bos;
-               if (bos && pskb_may_pull(skb, sizeof(*hdr) * label_index +
-                                        sizeof(struct iphdr))) {
+               if (!dec.bos)
+                       continue;
+
+               /* found bottom label; does skb have room for a header? */
+               if (pskb_may_pull(skb, mpls_hdr_len + sizeof(struct iphdr))) {
                        const struct iphdr *v4hdr;
 
-                       v4hdr = (const struct iphdr *)(mpls_hdr(skb) +
-                                                      label_index);
+                       v4hdr = (const struct iphdr *)(hdr + 1);
                        if (v4hdr->version == 4) {
                                hash = jhash_3words(ntohl(v4hdr->saddr),
                                                    ntohl(v4hdr->daddr),
                                                    v4hdr->protocol, hash);
                        } else if (v4hdr->version == 6 &&
-                               pskb_may_pull(skb, sizeof(*hdr) * label_index +
-                                             sizeof(struct ipv6hdr))) {
+                                  pskb_may_pull(skb, mpls_hdr_len +
+                                                sizeof(struct ipv6hdr))) {
                                const struct ipv6hdr *v6hdr;
 
-                               v6hdr = (const struct ipv6hdr *)(mpls_hdr(skb) +
-                                                               label_index);
-
+                               v6hdr = (const struct ipv6hdr *)(hdr + 1);
                                hash = __ipv6_addr_jhash(&v6hdr->saddr, hash);
                                hash = __ipv6_addr_jhash(&v6hdr->daddr, hash);
                                hash = jhash_1word(v6hdr->nexthdr, hash);
                        }
                }
+
+               break;
        }
 
        return hash;
 }
 
 static struct mpls_nh *mpls_select_multipath(struct mpls_route *rt,
-                                            struct sk_buff *skb, bool bos)
+                                            struct sk_buff *skb)
 {
        int alive = ACCESS_ONCE(rt->rt_nhn_alive);
        u32 hash = 0;
@@ -180,7 +182,7 @@ static struct mpls_nh *mpls_select_multipath(struct mpls_route *rt,
        if (alive <= 0)
                return NULL;
 
-       hash = mpls_multipath_hash(rt, skb, bos);
+       hash = mpls_multipath_hash(rt, skb);
        nh_index = hash % alive;
        if (alive == rt->rt_nhn)
                goto out;
@@ -278,17 +280,11 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev,
        hdr = mpls_hdr(skb);
        dec = mpls_entry_decode(hdr);
 
-       /* Pop the label */
-       skb_pull(skb, sizeof(*hdr));
-       skb_reset_network_header(skb);
-
-       skb_orphan(skb);
-
        rt = mpls_route_input_rcu(net, dec.label);
        if (!rt)
                goto drop;
 
-       nh = mpls_select_multipath(rt, skb, dec.bos);
+       nh = mpls_select_multipath(rt, skb);
        if (!nh)
                goto drop;
 
@@ -297,6 +293,12 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev,
        if (!mpls_output_possible(out_dev))
                goto drop;
 
+       /* Pop the label */
+       skb_pull(skb, sizeof(*hdr));
+       skb_reset_network_header(skb);
+
+       skb_orphan(skb);
+
        if (skb_warn_if_lro(skb))
                goto drop;
 
index 2f7ccd93441671d6aac3fd690cfc61283848579a..1d281c1ff7c10b3ae6e0245e2b95cc404dd791c4 100644 (file)
@@ -215,6 +215,7 @@ static const struct lwtunnel_encap_ops mpls_iptun_ops = {
        .fill_encap = mpls_fill_encap_info,
        .get_encap_size = mpls_encap_nlsize,
        .cmp_encap = mpls_encap_cmp,
+       .owner = THIS_MODULE,
 };
 
 static int __init mpls_iptunnel_init(void)
index 63729b489c2c17608575e4f58adb0c264e182d0b..bbc45f8a7b2de6801eab367fa7f3611e23b92b9e 100644 (file)
@@ -494,7 +494,7 @@ config NFT_CT
        depends on NF_CONNTRACK
        tristate "Netfilter nf_tables conntrack module"
        help
-         This option adds the "meta" expression that you can use to match
+         This option adds the "ct" expression that you can use to match
          connection tracking information such as the flow state.
 
 config NFT_SET_RBTREE
index 3a073cd9fcf49ed9cfd228a420cc2de928ca4459..4e8083c5e01d1ec631258af169c18aceed101e3a 100644 (file)
@@ -85,11 +85,11 @@ static __read_mostly DEFINE_SPINLOCK(nf_conntrack_locks_all_lock);
 static __read_mostly bool nf_conntrack_locks_all;
 
 /* every gc cycle scans at most 1/GC_MAX_BUCKETS_DIV part of table */
-#define GC_MAX_BUCKETS_DIV     64u
-/* upper bound of scan intervals */
-#define GC_INTERVAL_MAX                (2 * HZ)
-/* maximum conntracks to evict per gc run */
-#define GC_MAX_EVICTS          256u
+#define GC_MAX_BUCKETS_DIV     128u
+/* upper bound of full table scan */
+#define GC_MAX_SCAN_JIFFIES    (16u * HZ)
+/* desired ratio of entries found to be expired */
+#define GC_EVICT_RATIO 50u
 
 static struct conntrack_gc_work conntrack_gc_work;
 
@@ -938,6 +938,7 @@ static noinline int early_drop(struct net *net, unsigned int _hash)
 
 static void gc_worker(struct work_struct *work)
 {
+       unsigned int min_interval = max(HZ / GC_MAX_BUCKETS_DIV, 1u);
        unsigned int i, goal, buckets = 0, expired_count = 0;
        struct conntrack_gc_work *gc_work;
        unsigned int ratio, scanned = 0;
@@ -979,8 +980,7 @@ static void gc_worker(struct work_struct *work)
                 */
                rcu_read_unlock();
                cond_resched_rcu_qs();
-       } while (++buckets < goal &&
-                expired_count < GC_MAX_EVICTS);
+       } while (++buckets < goal);
 
        if (gc_work->exiting)
                return;
@@ -997,27 +997,25 @@ static void gc_worker(struct work_struct *work)
         * 1. Minimize time until we notice a stale entry
         * 2. Maximize scan intervals to not waste cycles
         *
-        * Normally, expired_count will be 0, this increases the next_run time
-        * to priorize 2) above.
+        * Normally, expire ratio will be close to 0.
         *
-        * As soon as a timed-out entry is found, move towards 1) and increase
-        * the scan frequency.
-        * In case we have lots of evictions next scan is done immediately.
+        * As soon as a sizeable fraction of the entries have expired
+        * increase scan frequency.
         */
        ratio = scanned ? expired_count * 100 / scanned : 0;
-       if (ratio >= 90 || expired_count == GC_MAX_EVICTS) {
-               gc_work->next_gc_run = 0;
-               next_run = 0;
-       } else if (expired_count) {
-               gc_work->next_gc_run /= 2U;
-               next_run = msecs_to_jiffies(1);
+       if (ratio > GC_EVICT_RATIO) {
+               gc_work->next_gc_run = min_interval;
        } else {
-               if (gc_work->next_gc_run < GC_INTERVAL_MAX)
-                       gc_work->next_gc_run += msecs_to_jiffies(1);
+               unsigned int max = GC_MAX_SCAN_JIFFIES / GC_MAX_BUCKETS_DIV;
 
-               next_run = gc_work->next_gc_run;
+               BUILD_BUG_ON((GC_MAX_SCAN_JIFFIES / GC_MAX_BUCKETS_DIV) == 0);
+
+               gc_work->next_gc_run += min_interval;
+               if (gc_work->next_gc_run > max)
+                       gc_work->next_gc_run = max;
        }
 
+       next_run = gc_work->next_gc_run;
        gc_work->last_bucket = i;
        queue_delayed_work(system_long_wq, &gc_work->dwork, next_run);
 }
@@ -1025,7 +1023,7 @@ static void gc_worker(struct work_struct *work)
 static void conntrack_gc_work_init(struct conntrack_gc_work *gc_work)
 {
        INIT_DELAYED_WORK(&gc_work->dwork, gc_worker);
-       gc_work->next_gc_run = GC_INTERVAL_MAX;
+       gc_work->next_gc_run = HZ;
        gc_work->exiting = false;
 }
 
@@ -1917,7 +1915,7 @@ int nf_conntrack_init_start(void)
        nf_ct_untracked_status_or(IPS_CONFIRMED | IPS_UNTRACKED);
 
        conntrack_gc_work_init(&conntrack_gc_work);
-       queue_delayed_work(system_long_wq, &conntrack_gc_work.dwork, GC_INTERVAL_MAX);
+       queue_delayed_work(system_long_wq, &conntrack_gc_work.dwork, HZ);
 
        return 0;
 
index 3dca90dc24ad392a6be6076e4ddbc345a1959ac0..ffb9e8ada899b770293744ed0da5bebba4b2166e 100644 (file)
@@ -13,7 +13,6 @@
 /* Internal logging interface, which relies on the real
    LOG target modules */
 
-#define NF_LOG_PREFIXLEN               128
 #define NFLOGGER_NAME_LEN              64
 
 static struct nf_logger __rcu *loggers[NFPROTO_NUMPROTO][NF_LOG_TYPE_MAX] __read_mostly;
index 0db5f9782265ebb033f10d07da815495e8a7d278..1b913760f205be79e1809c983cb3140c284a00cc 100644 (file)
@@ -928,7 +928,8 @@ static struct nft_chain *nf_tables_chain_lookup(const struct nft_table *table,
 }
 
 static const struct nla_policy nft_chain_policy[NFTA_CHAIN_MAX + 1] = {
-       [NFTA_CHAIN_TABLE]      = { .type = NLA_STRING },
+       [NFTA_CHAIN_TABLE]      = { .type = NLA_STRING,
+                                   .len = NFT_TABLE_MAXNAMELEN - 1 },
        [NFTA_CHAIN_HANDLE]     = { .type = NLA_U64 },
        [NFTA_CHAIN_NAME]       = { .type = NLA_STRING,
                                    .len = NFT_CHAIN_MAXNAMELEN - 1 },
@@ -1854,7 +1855,8 @@ static struct nft_rule *nf_tables_rule_lookup(const struct nft_chain *chain,
 }
 
 static const struct nla_policy nft_rule_policy[NFTA_RULE_MAX + 1] = {
-       [NFTA_RULE_TABLE]       = { .type = NLA_STRING },
+       [NFTA_RULE_TABLE]       = { .type = NLA_STRING,
+                                   .len = NFT_TABLE_MAXNAMELEN - 1 },
        [NFTA_RULE_CHAIN]       = { .type = NLA_STRING,
                                    .len = NFT_CHAIN_MAXNAMELEN - 1 },
        [NFTA_RULE_HANDLE]      = { .type = NLA_U64 },
@@ -2443,7 +2445,8 @@ nft_select_set_ops(const struct nlattr * const nla[],
 }
 
 static const struct nla_policy nft_set_policy[NFTA_SET_MAX + 1] = {
-       [NFTA_SET_TABLE]                = { .type = NLA_STRING },
+       [NFTA_SET_TABLE]                = { .type = NLA_STRING,
+                                           .len = NFT_TABLE_MAXNAMELEN - 1 },
        [NFTA_SET_NAME]                 = { .type = NLA_STRING,
                                            .len = NFT_SET_MAXNAMELEN - 1 },
        [NFTA_SET_FLAGS]                = { .type = NLA_U32 },
@@ -3084,9 +3087,9 @@ static int nf_tables_delset(struct net *net, struct sock *nlsk,
 }
 
 static int nf_tables_bind_check_setelem(const struct nft_ctx *ctx,
-                                       const struct nft_set *set,
+                                       struct nft_set *set,
                                        const struct nft_set_iter *iter,
-                                       const struct nft_set_elem *elem)
+                                       struct nft_set_elem *elem)
 {
        const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
        enum nft_registers dreg;
@@ -3192,8 +3195,10 @@ static const struct nla_policy nft_set_elem_policy[NFTA_SET_ELEM_MAX + 1] = {
 };
 
 static const struct nla_policy nft_set_elem_list_policy[NFTA_SET_ELEM_LIST_MAX + 1] = {
-       [NFTA_SET_ELEM_LIST_TABLE]      = { .type = NLA_STRING },
-       [NFTA_SET_ELEM_LIST_SET]        = { .type = NLA_STRING },
+       [NFTA_SET_ELEM_LIST_TABLE]      = { .type = NLA_STRING,
+                                           .len = NFT_TABLE_MAXNAMELEN - 1 },
+       [NFTA_SET_ELEM_LIST_SET]        = { .type = NLA_STRING,
+                                           .len = NFT_SET_MAXNAMELEN - 1 },
        [NFTA_SET_ELEM_LIST_ELEMENTS]   = { .type = NLA_NESTED },
        [NFTA_SET_ELEM_LIST_SET_ID]     = { .type = NLA_U32 },
 };
@@ -3303,9 +3308,9 @@ struct nft_set_dump_args {
 };
 
 static int nf_tables_dump_setelem(const struct nft_ctx *ctx,
-                                 const struct nft_set *set,
+                                 struct nft_set *set,
                                  const struct nft_set_iter *iter,
-                                 const struct nft_set_elem *elem)
+                                 struct nft_set_elem *elem)
 {
        struct nft_set_dump_args *args;
 
@@ -3317,7 +3322,7 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
 {
        struct net *net = sock_net(skb->sk);
        u8 genmask = nft_genmask_cur(net);
-       const struct nft_set *set;
+       struct nft_set *set;
        struct nft_set_dump_args args;
        struct nft_ctx ctx;
        struct nlattr *nla[NFTA_SET_ELEM_LIST_MAX + 1];
@@ -3740,10 +3745,18 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
                goto err5;
        }
 
+       if (set->size &&
+           !atomic_add_unless(&set->nelems, 1, set->size + set->ndeact)) {
+               err = -ENFILE;
+               goto err6;
+       }
+
        nft_trans_elem(trans) = elem;
        list_add_tail(&trans->list, &ctx->net->nft.commit_list);
        return 0;
 
+err6:
+       set->ops->remove(set, &elem);
 err5:
        kfree(trans);
 err4:
@@ -3790,15 +3803,9 @@ static int nf_tables_newsetelem(struct net *net, struct sock *nlsk,
                return -EBUSY;
 
        nla_for_each_nested(attr, nla[NFTA_SET_ELEM_LIST_ELEMENTS], rem) {
-               if (set->size &&
-                   !atomic_add_unless(&set->nelems, 1, set->size + set->ndeact))
-                       return -ENFILE;
-
                err = nft_add_set_elem(&ctx, set, attr, nlh->nlmsg_flags);
-               if (err < 0) {
-                       atomic_dec(&set->nelems);
+               if (err < 0)
                        break;
-               }
        }
        return err;
 }
@@ -3883,9 +3890,9 @@ err1:
 }
 
 static int nft_flush_set(const struct nft_ctx *ctx,
-                        const struct nft_set *set,
+                        struct nft_set *set,
                         const struct nft_set_iter *iter,
-                        const struct nft_set_elem *elem)
+                        struct nft_set_elem *elem)
 {
        struct nft_trans *trans;
        int err;
@@ -3899,9 +3906,10 @@ static int nft_flush_set(const struct nft_ctx *ctx,
                err = -ENOENT;
                goto err1;
        }
+       set->ndeact++;
 
-       nft_trans_elem_set(trans) = (struct nft_set *)set;
-       nft_trans_elem(trans) = *((struct nft_set_elem *)elem);
+       nft_trans_elem_set(trans) = set;
+       nft_trans_elem(trans) = *elem;
        list_add_tail(&trans->list, &ctx->net->nft.commit_list);
 
        return 0;
@@ -4032,8 +4040,10 @@ struct nft_object *nf_tables_obj_lookup(const struct nft_table *table,
 EXPORT_SYMBOL_GPL(nf_tables_obj_lookup);
 
 static const struct nla_policy nft_obj_policy[NFTA_OBJ_MAX + 1] = {
-       [NFTA_OBJ_TABLE]        = { .type = NLA_STRING },
-       [NFTA_OBJ_NAME]         = { .type = NLA_STRING },
+       [NFTA_OBJ_TABLE]        = { .type = NLA_STRING,
+                                   .len = NFT_TABLE_MAXNAMELEN - 1 },
+       [NFTA_OBJ_NAME]         = { .type = NLA_STRING,
+                                   .len = NFT_OBJ_MAXNAMELEN - 1 },
        [NFTA_OBJ_TYPE]         = { .type = NLA_U32 },
        [NFTA_OBJ_DATA]         = { .type = NLA_NESTED },
 };
@@ -4262,10 +4272,11 @@ static int nf_tables_dump_obj(struct sk_buff *skb, struct netlink_callback *cb)
                                if (idx > s_idx)
                                        memset(&cb->args[1], 0,
                                               sizeof(cb->args) - sizeof(cb->args[0]));
-                               if (filter->table[0] &&
+                               if (filter && filter->table[0] &&
                                    strcmp(filter->table, table->name))
                                        goto cont;
-                               if (filter->type != NFT_OBJECT_UNSPEC &&
+                               if (filter &&
+                                   filter->type != NFT_OBJECT_UNSPEC &&
                                    obj->type->type != filter->type)
                                        goto cont;
 
@@ -5009,9 +5020,9 @@ static int nf_tables_check_loops(const struct nft_ctx *ctx,
                                 const struct nft_chain *chain);
 
 static int nf_tables_loop_check_setelem(const struct nft_ctx *ctx,
-                                       const struct nft_set *set,
+                                       struct nft_set *set,
                                        const struct nft_set_iter *iter,
-                                       const struct nft_set_elem *elem)
+                                       struct nft_set_elem *elem)
 {
        const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
        const struct nft_data *data;
@@ -5035,7 +5046,7 @@ static int nf_tables_check_loops(const struct nft_ctx *ctx,
 {
        const struct nft_rule *rule;
        const struct nft_expr *expr, *last;
-       const struct nft_set *set;
+       struct nft_set *set;
        struct nft_set_binding *binding;
        struct nft_set_iter iter;
 
index 7de2f46734a428d0938fef91aa914865c62d680e..049ad2d9ee66959367a051903563dca6ba654edb 100644 (file)
@@ -98,7 +98,8 @@ out:
 }
 
 static const struct nla_policy nft_dynset_policy[NFTA_DYNSET_MAX + 1] = {
-       [NFTA_DYNSET_SET_NAME]  = { .type = NLA_STRING },
+       [NFTA_DYNSET_SET_NAME]  = { .type = NLA_STRING,
+                                   .len = NFT_SET_MAXNAMELEN - 1 },
        [NFTA_DYNSET_SET_ID]    = { .type = NLA_U32 },
        [NFTA_DYNSET_OP]        = { .type = NLA_U32 },
        [NFTA_DYNSET_SREG_KEY]  = { .type = NLA_U32 },
index 6271e40a3dd6d00b0a19f31a5ef5509185120505..6f6e64423643a8c2991f2dedab65aff9e8c92720 100644 (file)
@@ -39,7 +39,8 @@ static void nft_log_eval(const struct nft_expr *expr,
 
 static const struct nla_policy nft_log_policy[NFTA_LOG_MAX + 1] = {
        [NFTA_LOG_GROUP]        = { .type = NLA_U16 },
-       [NFTA_LOG_PREFIX]       = { .type = NLA_STRING },
+       [NFTA_LOG_PREFIX]       = { .type = NLA_STRING,
+                                   .len = NF_LOG_PREFIXLEN - 1 },
        [NFTA_LOG_SNAPLEN]      = { .type = NLA_U32 },
        [NFTA_LOG_QTHRESHOLD]   = { .type = NLA_U16 },
        [NFTA_LOG_LEVEL]        = { .type = NLA_U32 },
index d4f97fa7e21d0036690e229768ab097fc5220cfc..e21aea7e5ec8f141ea3155d1da3c491484c00a73 100644 (file)
@@ -49,7 +49,8 @@ static void nft_lookup_eval(const struct nft_expr *expr,
 }
 
 static const struct nla_policy nft_lookup_policy[NFTA_LOOKUP_MAX + 1] = {
-       [NFTA_LOOKUP_SET]       = { .type = NLA_STRING },
+       [NFTA_LOOKUP_SET]       = { .type = NLA_STRING,
+                                   .len = NFT_SET_MAXNAMELEN - 1 },
        [NFTA_LOOKUP_SET_ID]    = { .type = NLA_U32 },
        [NFTA_LOOKUP_SREG]      = { .type = NLA_U32 },
        [NFTA_LOOKUP_DREG]      = { .type = NLA_U32 },
index 415a65ba2b85eb93a6866fe42909aa6fcfd74cb3..1ae8c49ca4a1fac06f69c41f68a36b7e85593adb 100644 (file)
@@ -193,10 +193,12 @@ nft_objref_select_ops(const struct nft_ctx *ctx,
 }
 
 static const struct nla_policy nft_objref_policy[NFTA_OBJREF_MAX + 1] = {
-       [NFTA_OBJREF_IMM_NAME]  = { .type = NLA_STRING },
+       [NFTA_OBJREF_IMM_NAME]  = { .type = NLA_STRING,
+                                   .len = NFT_OBJ_MAXNAMELEN - 1 },
        [NFTA_OBJREF_IMM_TYPE]  = { .type = NLA_U32 },
        [NFTA_OBJREF_SET_SREG]  = { .type = NLA_U32 },
-       [NFTA_OBJREF_SET_NAME]  = { .type = NLA_STRING },
+       [NFTA_OBJREF_SET_NAME]  = { .type = NLA_STRING,
+                                   .len = NFT_SET_MAXNAMELEN - 1 },
        [NFTA_OBJREF_SET_ID]    = { .type = NLA_U32 },
 };
 
index 1e20e2bbb6d924b5cdb331acf8610c8719763c7c..e36069fb76aebd4140098f38a3758135e78b8d43 100644 (file)
@@ -212,7 +212,7 @@ static void nft_hash_remove(const struct nft_set *set,
        rhashtable_remove_fast(&priv->ht, &he->node, nft_hash_params);
 }
 
-static void nft_hash_walk(const struct nft_ctx *ctx, const struct nft_set *set,
+static void nft_hash_walk(const struct nft_ctx *ctx, struct nft_set *set,
                          struct nft_set_iter *iter)
 {
        struct nft_hash *priv = nft_set_priv(set);
index 08376e50f6cdc26d7864e846b5a07f2ca662207c..f06f55ee516de44d3b0367cc52afb32173a2d0e8 100644 (file)
@@ -221,7 +221,7 @@ static void *nft_rbtree_deactivate(const struct net *net,
 }
 
 static void nft_rbtree_walk(const struct nft_ctx *ctx,
-                           const struct nft_set *set,
+                           struct nft_set *set,
                            struct nft_set_iter *iter)
 {
        const struct nft_rbtree *priv = nft_set_priv(set);
index b9e1a13b4ba36a0bc7edf6a8c2c116c7d48c970c..70f5b6a4683c29351284e3385f56caf1bce37ac7 100644 (file)
@@ -1497,6 +1497,8 @@ static void __fanout_link(struct sock *sk, struct packet_sock *po)
        f->arr[f->num_members] = sk;
        smp_wmb();
        f->num_members++;
+       if (f->num_members == 1)
+               dev_add_pack(&f->prot_hook);
        spin_unlock(&f->lock);
 }
 
@@ -1513,6 +1515,8 @@ static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
        BUG_ON(i >= f->num_members);
        f->arr[i] = f->arr[f->num_members - 1];
        f->num_members--;
+       if (f->num_members == 0)
+               __dev_remove_pack(&f->prot_hook);
        spin_unlock(&f->lock);
 }
 
@@ -1619,6 +1623,7 @@ static void fanout_release_data(struct packet_fanout *f)
 
 static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
 {
+       struct packet_rollover *rollover = NULL;
        struct packet_sock *po = pkt_sk(sk);
        struct packet_fanout *f, *match;
        u8 type = type_flags & 0xff;
@@ -1641,23 +1646,28 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
                return -EINVAL;
        }
 
+       mutex_lock(&fanout_mutex);
+
+       err = -EINVAL;
        if (!po->running)
-               return -EINVAL;
+               goto out;
 
+       err = -EALREADY;
        if (po->fanout)
-               return -EALREADY;
+               goto out;
 
        if (type == PACKET_FANOUT_ROLLOVER ||
            (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)) {
-               po->rollover = kzalloc(sizeof(*po->rollover), GFP_KERNEL);
-               if (!po->rollover)
-                       return -ENOMEM;
-               atomic_long_set(&po->rollover->num, 0);
-               atomic_long_set(&po->rollover->num_huge, 0);
-               atomic_long_set(&po->rollover->num_failed, 0);
+               err = -ENOMEM;
+               rollover = kzalloc(sizeof(*rollover), GFP_KERNEL);
+               if (!rollover)
+                       goto out;
+               atomic_long_set(&rollover->num, 0);
+               atomic_long_set(&rollover->num_huge, 0);
+               atomic_long_set(&rollover->num_failed, 0);
+               po->rollover = rollover;
        }
 
-       mutex_lock(&fanout_mutex);
        match = NULL;
        list_for_each_entry(f, &fanout_list, list) {
                if (f->id == id &&
@@ -1687,7 +1697,6 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
                match->prot_hook.func = packet_rcv_fanout;
                match->prot_hook.af_packet_priv = match;
                match->prot_hook.id_match = match_fanout_group;
-               dev_add_pack(&match->prot_hook);
                list_add(&match->list, &fanout_list);
        }
        err = -EINVAL;
@@ -1704,36 +1713,40 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
                }
        }
 out:
-       mutex_unlock(&fanout_mutex);
-       if (err) {
-               kfree(po->rollover);
+       if (err && rollover) {
+               kfree(rollover);
                po->rollover = NULL;
        }
+       mutex_unlock(&fanout_mutex);
        return err;
 }
 
-static void fanout_release(struct sock *sk)
+/* If pkt_sk(sk)->fanout->sk_ref is zero, this function removes
+ * pkt_sk(sk)->fanout from fanout_list and returns pkt_sk(sk)->fanout.
+ * It is the responsibility of the caller to call fanout_release_data() and
+ * free the returned packet_fanout (after synchronize_net())
+ */
+static struct packet_fanout *fanout_release(struct sock *sk)
 {
        struct packet_sock *po = pkt_sk(sk);
        struct packet_fanout *f;
 
+       mutex_lock(&fanout_mutex);
        f = po->fanout;
-       if (!f)
-               return;
+       if (f) {
+               po->fanout = NULL;
 
-       mutex_lock(&fanout_mutex);
-       po->fanout = NULL;
+               if (atomic_dec_and_test(&f->sk_ref))
+                       list_del(&f->list);
+               else
+                       f = NULL;
 
-       if (atomic_dec_and_test(&f->sk_ref)) {
-               list_del(&f->list);
-               dev_remove_pack(&f->prot_hook);
-               fanout_release_data(f);
-               kfree(f);
+               if (po->rollover)
+                       kfree_rcu(po->rollover, rcu);
        }
        mutex_unlock(&fanout_mutex);
 
-       if (po->rollover)
-               kfree_rcu(po->rollover, rcu);
+       return f;
 }
 
 static bool packet_extra_vlan_len_allowed(const struct net_device *dev,
@@ -1976,7 +1989,7 @@ static int packet_rcv_vnet(struct msghdr *msg, const struct sk_buff *skb,
                return -EINVAL;
        *len -= sizeof(vnet_hdr);
 
-       if (virtio_net_hdr_from_skb(skb, &vnet_hdr, vio_le()))
+       if (virtio_net_hdr_from_skb(skb, &vnet_hdr, vio_le(), true))
                return -EINVAL;
 
        return memcpy_to_msg(msg, (void *)&vnet_hdr, sizeof(vnet_hdr));
@@ -2237,7 +2250,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
        if (po->has_vnet_hdr) {
                if (virtio_net_hdr_from_skb(skb, h.raw + macoff -
                                            sizeof(struct virtio_net_hdr),
-                                           vio_le())) {
+                                           vio_le(), true)) {
                        spin_lock(&sk->sk_receive_queue.lock);
                        goto drop_n_account;
                }
@@ -2755,7 +2768,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
        struct virtio_net_hdr vnet_hdr = { 0 };
        int offset = 0;
        struct packet_sock *po = pkt_sk(sk);
-       int hlen, tlen;
+       int hlen, tlen, linear;
        int extra_len = 0;
 
        /*
@@ -2816,8 +2829,9 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
        err = -ENOBUFS;
        hlen = LL_RESERVED_SPACE(dev);
        tlen = dev->needed_tailroom;
-       skb = packet_alloc_skb(sk, hlen + tlen, hlen, len,
-                              __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len),
+       linear = __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len);
+       linear = max(linear, min_t(int, len, dev->hard_header_len));
+       skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, linear,
                               msg->msg_flags & MSG_DONTWAIT, &err);
        if (skb == NULL)
                goto out_unlock;
@@ -2906,6 +2920,7 @@ static int packet_release(struct socket *sock)
 {
        struct sock *sk = sock->sk;
        struct packet_sock *po;
+       struct packet_fanout *f;
        struct net *net;
        union tpacket_req_u req_u;
 
@@ -2945,9 +2960,14 @@ static int packet_release(struct socket *sock)
                packet_set_ring(sk, &req_u, 1, 1);
        }
 
-       fanout_release(sk);
+       f = fanout_release(sk);
 
        synchronize_net();
+
+       if (f) {
+               fanout_release_data(f);
+               kfree(f);
+       }
        /*
         *      Now the socket is dead. No more input will appear.
         */
@@ -3899,7 +3919,6 @@ static int packet_notifier(struct notifier_block *this,
                                }
                                if (msg == NETDEV_UNREGISTER) {
                                        packet_cached_dev_reset(po);
-                                       fanout_release(sk);
                                        po->ifindex = -1;
                                        if (po->prot_hook.dev)
                                                dev_put(po->prot_hook.dev);
index 970db7a41684aa2a494b97663f91ca932308de05..5752789acc135250c312199c2d6e5e15d05fdea0 100644 (file)
@@ -568,9 +568,9 @@ static int fl_set_key(struct net *net, struct nlattr **tb,
                               &mask->icmp.type,
                               TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
                               sizeof(key->icmp.type));
-               fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV4_CODE,
+               fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV6_CODE,
                               &mask->icmp.code,
-                              TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
+                              TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
                               sizeof(key->icmp.code));
        }
 
index f935429bd5ef1fcbe6a4272876b76e2ebb574c4b..b12bc2abea931a7defd1e23eb86a20fe09e76388 100644 (file)
 #include <net/sch_generic.h>
 #include <net/pkt_cls.h>
 
-struct cls_mall_filter {
+struct cls_mall_head {
        struct tcf_exts exts;
        struct tcf_result res;
        u32 handle;
-       struct rcu_head rcu;
        u32 flags;
-};
-
-struct cls_mall_head {
-       struct cls_mall_filter *filter;
        struct rcu_head rcu;
 };
 
@@ -33,38 +28,29 @@ static int mall_classify(struct sk_buff *skb, const struct tcf_proto *tp,
                         struct tcf_result *res)
 {
        struct cls_mall_head *head = rcu_dereference_bh(tp->root);
-       struct cls_mall_filter *f = head->filter;
 
-       if (tc_skip_sw(f->flags))
+       if (tc_skip_sw(head->flags))
                return -1;
 
-       return tcf_exts_exec(skb, &f->exts, res);
+       return tcf_exts_exec(skb, &head->exts, res);
 }
 
 static int mall_init(struct tcf_proto *tp)
 {
-       struct cls_mall_head *head;
-
-       head = kzalloc(sizeof(*head), GFP_KERNEL);
-       if (!head)
-               return -ENOBUFS;
-
-       rcu_assign_pointer(tp->root, head);
-
        return 0;
 }
 
-static void mall_destroy_filter(struct rcu_head *head)
+static void mall_destroy_rcu(struct rcu_head *rcu)
 {
-       struct cls_mall_filter *f = container_of(head, struct cls_mall_filter, rcu);
+       struct cls_mall_head *head = container_of(rcu, struct cls_mall_head,
+                                                 rcu);
 
-       tcf_exts_destroy(&f->exts);
-
-       kfree(f);
+       tcf_exts_destroy(&head->exts);
+       kfree(head);
 }
 
 static int mall_replace_hw_filter(struct tcf_proto *tp,
-                                 struct cls_mall_filter *f,
+                                 struct cls_mall_head *head,
                                  unsigned long cookie)
 {
        struct net_device *dev = tp->q->dev_queue->dev;
@@ -74,7 +60,7 @@ static int mall_replace_hw_filter(struct tcf_proto *tp,
        offload.type = TC_SETUP_MATCHALL;
        offload.cls_mall = &mall_offload;
        offload.cls_mall->command = TC_CLSMATCHALL_REPLACE;
-       offload.cls_mall->exts = &f->exts;
+       offload.cls_mall->exts = &head->exts;
        offload.cls_mall->cookie = cookie;
 
        return dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol,
@@ -82,7 +68,7 @@ static int mall_replace_hw_filter(struct tcf_proto *tp,
 }
 
 static void mall_destroy_hw_filter(struct tcf_proto *tp,
-                                  struct cls_mall_filter *f,
+                                  struct cls_mall_head *head,
                                   unsigned long cookie)
 {
        struct net_device *dev = tp->q->dev_queue->dev;
@@ -103,29 +89,20 @@ static bool mall_destroy(struct tcf_proto *tp, bool force)
 {
        struct cls_mall_head *head = rtnl_dereference(tp->root);
        struct net_device *dev = tp->q->dev_queue->dev;
-       struct cls_mall_filter *f = head->filter;
 
-       if (!force && f)
-               return false;
+       if (!head)
+               return true;
 
-       if (f) {
-               if (tc_should_offload(dev, tp, f->flags))
-                       mall_destroy_hw_filter(tp, f, (unsigned long) f);
+       if (tc_should_offload(dev, tp, head->flags))
+               mall_destroy_hw_filter(tp, head, (unsigned long) head);
 
-               call_rcu(&f->rcu, mall_destroy_filter);
-       }
-       kfree_rcu(head, rcu);
+       call_rcu(&head->rcu, mall_destroy_rcu);
        return true;
 }
 
 static unsigned long mall_get(struct tcf_proto *tp, u32 handle)
 {
-       struct cls_mall_head *head = rtnl_dereference(tp->root);
-       struct cls_mall_filter *f = head->filter;
-
-       if (f && f->handle == handle)
-               return (unsigned long) f;
-       return 0;
+       return 0UL;
 }
 
 static const struct nla_policy mall_policy[TCA_MATCHALL_MAX + 1] = {
@@ -134,7 +111,7 @@ static const struct nla_policy mall_policy[TCA_MATCHALL_MAX + 1] = {
 };
 
 static int mall_set_parms(struct net *net, struct tcf_proto *tp,
-                         struct cls_mall_filter *f,
+                         struct cls_mall_head *head,
                          unsigned long base, struct nlattr **tb,
                          struct nlattr *est, bool ovr)
 {
@@ -147,11 +124,11 @@ static int mall_set_parms(struct net *net, struct tcf_proto *tp,
                return err;
 
        if (tb[TCA_MATCHALL_CLASSID]) {
-               f->res.classid = nla_get_u32(tb[TCA_MATCHALL_CLASSID]);
-               tcf_bind_filter(tp, &f->res, base);
+               head->res.classid = nla_get_u32(tb[TCA_MATCHALL_CLASSID]);
+               tcf_bind_filter(tp, &head->res, base);
        }
 
-       tcf_exts_change(tp, &f->exts, &e);
+       tcf_exts_change(tp, &head->exts, &e);
 
        return 0;
 }
@@ -162,21 +139,17 @@ static int mall_change(struct net *net, struct sk_buff *in_skb,
                       unsigned long *arg, bool ovr)
 {
        struct cls_mall_head *head = rtnl_dereference(tp->root);
-       struct cls_mall_filter *fold = (struct cls_mall_filter *) *arg;
        struct net_device *dev = tp->q->dev_queue->dev;
-       struct cls_mall_filter *f;
        struct nlattr *tb[TCA_MATCHALL_MAX + 1];
+       struct cls_mall_head *new;
        u32 flags = 0;
        int err;
 
        if (!tca[TCA_OPTIONS])
                return -EINVAL;
 
-       if (head->filter)
-               return -EBUSY;
-
-       if (fold)
-               return -EINVAL;
+       if (head)
+               return -EEXIST;
 
        err = nla_parse_nested(tb, TCA_MATCHALL_MAX,
                               tca[TCA_OPTIONS], mall_policy);
@@ -189,23 +162,23 @@ static int mall_change(struct net *net, struct sk_buff *in_skb,
                        return -EINVAL;
        }
 
-       f = kzalloc(sizeof(*f), GFP_KERNEL);
-       if (!f)
+       new = kzalloc(sizeof(*new), GFP_KERNEL);
+       if (!new)
                return -ENOBUFS;
 
-       tcf_exts_init(&f->exts, TCA_MATCHALL_ACT, 0);
+       tcf_exts_init(&new->exts, TCA_MATCHALL_ACT, 0);
 
        if (!handle)
                handle = 1;
-       f->handle = handle;
-       f->flags = flags;
+       new->handle = handle;
+       new->flags = flags;
 
-       err = mall_set_parms(net, tp, f, base, tb, tca[TCA_RATE], ovr);
+       err = mall_set_parms(net, tp, new, base, tb, tca[TCA_RATE], ovr);
        if (err)
                goto errout;
 
        if (tc_should_offload(dev, tp, flags)) {
-               err = mall_replace_hw_filter(tp, f, (unsigned long) f);
+               err = mall_replace_hw_filter(tp, new, (unsigned long) new);
                if (err) {
                        if (tc_skip_sw(flags))
                                goto errout;
@@ -214,39 +187,29 @@ static int mall_change(struct net *net, struct sk_buff *in_skb,
                }
        }
 
-       *arg = (unsigned long) f;
-       rcu_assign_pointer(head->filter, f);
-
+       *arg = (unsigned long) head;
+       rcu_assign_pointer(tp->root, new);
+       if (head)
+               call_rcu(&head->rcu, mall_destroy_rcu);
        return 0;
 
 errout:
-       kfree(f);
+       kfree(new);
        return err;
 }
 
 static int mall_delete(struct tcf_proto *tp, unsigned long arg)
 {
-       struct cls_mall_head *head = rtnl_dereference(tp->root);
-       struct cls_mall_filter *f = (struct cls_mall_filter *) arg;
-       struct net_device *dev = tp->q->dev_queue->dev;
-
-       if (tc_should_offload(dev, tp, f->flags))
-               mall_destroy_hw_filter(tp, f, (unsigned long) f);
-
-       RCU_INIT_POINTER(head->filter, NULL);
-       tcf_unbind_filter(tp, &f->res);
-       call_rcu(&f->rcu, mall_destroy_filter);
-       return 0;
+       return -EOPNOTSUPP;
 }
 
 static void mall_walk(struct tcf_proto *tp, struct tcf_walker *arg)
 {
        struct cls_mall_head *head = rtnl_dereference(tp->root);
-       struct cls_mall_filter *f = head->filter;
 
        if (arg->count < arg->skip)
                goto skip;
-       if (arg->fn(tp, (unsigned long) f, arg) < 0)
+       if (arg->fn(tp, (unsigned long) head, arg) < 0)
                arg->stop = 1;
 skip:
        arg->count++;
@@ -255,28 +218,28 @@ skip:
 static int mall_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
                     struct sk_buff *skb, struct tcmsg *t)
 {
-       struct cls_mall_filter *f = (struct cls_mall_filter *) fh;
+       struct cls_mall_head *head = (struct cls_mall_head *) fh;
        struct nlattr *nest;
 
-       if (!f)
+       if (!head)
                return skb->len;
 
-       t->tcm_handle = f->handle;
+       t->tcm_handle = head->handle;
 
        nest = nla_nest_start(skb, TCA_OPTIONS);
        if (!nest)
                goto nla_put_failure;
 
-       if (f->res.classid &&
-           nla_put_u32(skb, TCA_MATCHALL_CLASSID, f->res.classid))
+       if (head->res.classid &&
+           nla_put_u32(skb, TCA_MATCHALL_CLASSID, head->res.classid))
                goto nla_put_failure;
 
-       if (tcf_exts_dump(skb, &f->exts))
+       if (tcf_exts_dump(skb, &head->exts))
                goto nla_put_failure;
 
        nla_nest_end(skb, nest);
 
-       if (tcf_exts_dump_stats(skb, &f->exts) < 0)
+       if (tcf_exts_dump_stats(skb, &head->exts) < 0)
                goto nla_put_failure;
 
        return skb->len;
index 5ed8e79bf102e1c2a9ef1955aee6dba01869545e..64dfd35ccdcccbf35b2f6273565f2dbf89f941c5 100644 (file)
@@ -222,7 +222,8 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport)
        SCTP_INC_STATS(sock_net(sk), SCTP_MIB_OUTSCTPPACKS);
 
        rcu_read_lock();
-       res = ip6_xmit(sk, skb, fl6, rcu_dereference(np->opt), np->tclass);
+       res = ip6_xmit(sk, skb, fl6, sk->sk_mark, rcu_dereference(np->opt),
+                      np->tclass);
        rcu_read_unlock();
        return res;
 }
index 7e869d0cca69826ee3e892e389bacdc9a58a1637..4f5a2b580aa52deb75e00c92d8b60992cf5bdaa6 100644 (file)
@@ -68,7 +68,7 @@ static struct sk_buff *sctp_gso_segment(struct sk_buff *skb,
                goto out;
        }
 
-       segs = skb_segment(skb, features | NETIF_F_HW_CSUM);
+       segs = skb_segment(skb, features | NETIF_F_HW_CSUM | NETIF_F_SG);
        if (IS_ERR(segs))
                goto out;
 
index 318c6786d6539a301ac7b76d82a49a1af3818d10..1b5d669e30292a57ed57dd920d81be2a57f97b22 100644 (file)
@@ -235,8 +235,12 @@ static struct sctp_transport *sctp_addr_id2transport(struct sock *sk,
                                              sctp_assoc_t id)
 {
        struct sctp_association *addr_asoc = NULL, *id_asoc = NULL;
-       struct sctp_transport *transport;
+       struct sctp_af *af = sctp_get_af_specific(addr->ss_family);
        union sctp_addr *laddr = (union sctp_addr *)addr;
+       struct sctp_transport *transport;
+
+       if (!af || sctp_verify_addr(sk, laddr, af->sockaddr_len))
+               return NULL;
 
        addr_asoc = sctp_endpoint_lookup_assoc(sctp_sk(sk)->ep,
                                               laddr,
@@ -7422,7 +7426,8 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
                 */
                release_sock(sk);
                current_timeo = schedule_timeout(current_timeo);
-               BUG_ON(sk != asoc->base.sk);
+               if (sk != asoc->base.sk)
+                       goto do_error;
                lock_sock(sk);
 
                *timeo_p = current_timeo;
index dc6fb79a361f1ca3ab9869fc02ba05c1a533ad9b..25d9a9cf7b66b7f4e501d38d91f6a1908830972e 100644 (file)
@@ -260,7 +260,7 @@ static int gssx_dec_option_array(struct xdr_stream *xdr,
        if (!oa->data)
                return -ENOMEM;
 
-       creds = kmalloc(sizeof(struct svc_cred), GFP_KERNEL);
+       creds = kzalloc(sizeof(struct svc_cred), GFP_KERNEL);
        if (!creds) {
                kfree(oa->data);
                return -ENOMEM;
index 8147e8d56eb214667a9d27a946bacde32637653d..f39e3e11f9aa283698ced6a8ca92fed5f68140e5 100644 (file)
@@ -1358,7 +1358,7 @@ static int c_show(struct seq_file *m, void *p)
        ifdebug(CACHE)
                seq_printf(m, "# expiry=%ld refcnt=%d flags=%lx\n",
                           convert_to_wallclock(cp->expiry_time),
-                          atomic_read(&cp->ref.refcount), cp->flags);
+                          kref_read(&cp->ref), cp->flags);
        cache_get(cp);
        if (cache_check(cd, cp, NULL))
                /* cache_check does a cache_put on failure */
index 1efbe48e794f804b24f30e566994b873a85727bb..1dc9f3bac0997384a9b3daab33d8ceeda3d726d9 100644 (file)
@@ -336,6 +336,11 @@ out:
 
 static DEFINE_IDA(rpc_clids);
 
+void rpc_cleanup_clids(void)
+{
+       ida_destroy(&rpc_clids);
+}
+
 static int rpc_alloc_clid(struct rpc_clnt *clnt)
 {
        int clid;
index d1c330a7953a0bc9bb4e08162baf77740504b80c..c73de181467a307346872629ac5c62ce1cde3d72 100644 (file)
@@ -119,6 +119,7 @@ out:
 static void __exit
 cleanup_sunrpc(void)
 {
+       rpc_cleanup_clids();
        rpcauth_remove_module();
        cleanup_socket_xprt();
        svc_cleanup_xprt_sock();
index 9c9db55a0c1e1735e522e406797c4efb25930b07..7bfe1fb42addcd41b00d297b85e2009d711b6f15 100644 (file)
@@ -490,7 +490,7 @@ static struct svc_xprt *svc_xprt_dequeue(struct svc_pool *pool)
                svc_xprt_get(xprt);
 
                dprintk("svc: transport %p dequeued, inuse=%d\n",
-                       xprt, atomic_read(&xprt->xpt_ref.refcount));
+                       xprt, kref_read(&xprt->xpt_ref));
        }
        spin_unlock_bh(&pool->sp_lock);
 out:
@@ -822,7 +822,7 @@ static int svc_handle_xprt(struct svc_rqst *rqstp, struct svc_xprt *xprt)
                /* XPT_DATA|XPT_DEFERRED case: */
                dprintk("svc: server %p, pool %u, transport %p, inuse=%d\n",
                        rqstp, rqstp->rq_pool->sp_id, xprt,
-                       atomic_read(&xprt->xpt_ref.refcount));
+                       kref_read(&xprt->xpt_ref));
                rqstp->rq_deferred = svc_deferred_dequeue(xprt);
                if (rqstp->rq_deferred)
                        len = svc_deferred_recv(rqstp);
@@ -980,7 +980,7 @@ static void svc_age_temp_xprts(unsigned long closure)
                 * through, close it. */
                if (!test_and_set_bit(XPT_OLD, &xprt->xpt_flags))
                        continue;
-               if (atomic_read(&xprt->xpt_ref.refcount) > 1 ||
+               if (kref_read(&xprt->xpt_ref) > 1 ||
                    test_bit(XPT_BUSY, &xprt->xpt_flags))
                        continue;
                list_del_init(le);
index e112da8005b5c8f8f70a94124705b6c8c941c555..bb8db3cb8032ee0a4714cf3b49aeb83cb73037bd 100644 (file)
@@ -126,13 +126,18 @@ EXPORT_SYMBOL_GPL(svc_auth_unregister);
 static struct hlist_head       auth_domain_table[DN_HASHMAX];
 static DEFINE_SPINLOCK(auth_domain_lock);
 
+static void auth_domain_release(struct kref *kref)
+{
+       struct auth_domain *dom = container_of(kref, struct auth_domain, ref);
+
+       hlist_del(&dom->hash);
+       dom->flavour->domain_release(dom);
+       spin_unlock(&auth_domain_lock);
+}
+
 void auth_domain_put(struct auth_domain *dom)
 {
-       if (atomic_dec_and_lock(&dom->ref.refcount, &auth_domain_lock)) {
-               hlist_del(&dom->hash);
-               dom->flavour->domain_release(dom);
-               spin_unlock(&auth_domain_lock);
-       }
+       kref_put_lock(&dom->ref, auth_domain_release, &auth_domain_lock);
 }
 EXPORT_SYMBOL_GPL(auth_domain_put);
 
index ca2799af05a6846980e2dfa2bd7c58c655a6f069..39652d390a9c60bc026199a7dcb5ef996bcd65ab 100644 (file)
@@ -1201,9 +1201,9 @@ static void __svc_rdma_free(struct work_struct *work)
                ib_drain_qp(rdma->sc_qp);
 
        /* We should only be called from kref_put */
-       if (atomic_read(&xprt->xpt_ref.refcount) != 0)
+       if (kref_read(&xprt->xpt_ref) != 0)
                pr_err("svcrdma: sc_xprt still in use? (%d)\n",
-                      atomic_read(&xprt->xpt_ref.refcount));
+                      kref_read(&xprt->xpt_ref));
 
        /*
         * Destroy queued, but not processed read completions. Note
index 9d2f4c2b08abc56ecb627ff067ad359c54e735fd..27753325e06e4355d71270d8c5c8818bef3e94c5 100644 (file)
@@ -263,6 +263,11 @@ static void tipc_node_write_lock(struct tipc_node *n)
        write_lock_bh(&n->lock);
 }
 
+static void tipc_node_write_unlock_fast(struct tipc_node *n)
+{
+       write_unlock_bh(&n->lock);
+}
+
 static void tipc_node_write_unlock(struct tipc_node *n)
 {
        struct net *net = n->net;
@@ -417,7 +422,7 @@ void tipc_node_subscribe(struct net *net, struct list_head *subscr, u32 addr)
        }
        tipc_node_write_lock(n);
        list_add_tail(subscr, &n->publ_list);
-       tipc_node_write_unlock(n);
+       tipc_node_write_unlock_fast(n);
        tipc_node_put(n);
 }
 
@@ -435,7 +440,7 @@ void tipc_node_unsubscribe(struct net *net, struct list_head *subscr, u32 addr)
        }
        tipc_node_write_lock(n);
        list_del_init(subscr);
-       tipc_node_write_unlock(n);
+       tipc_node_write_unlock_fast(n);
        tipc_node_put(n);
 }
 
index 215849ce453dfbd70bccceacc4d42b1a2a6d37d8..3cd6402e812cb05c5fc0872941a8a6c683e367f3 100644 (file)
@@ -86,12 +86,12 @@ struct outqueue_entry {
 static void tipc_recv_work(struct work_struct *work);
 static void tipc_send_work(struct work_struct *work);
 static void tipc_clean_outqueues(struct tipc_conn *con);
-static void tipc_sock_release(struct tipc_conn *con);
 
 static void tipc_conn_kref_release(struct kref *kref)
 {
        struct tipc_conn *con = container_of(kref, struct tipc_conn, kref);
-       struct sockaddr_tipc *saddr = con->server->saddr;
+       struct tipc_server *s = con->server;
+       struct sockaddr_tipc *saddr = s->saddr;
        struct socket *sock = con->sock;
        struct sock *sk;
 
@@ -103,9 +103,13 @@ static void tipc_conn_kref_release(struct kref *kref)
                }
                saddr->scope = -TIPC_NODE_SCOPE;
                kernel_bind(sock, (struct sockaddr *)saddr, sizeof(*saddr));
-               tipc_sock_release(con);
                sock_release(sock);
                con->sock = NULL;
+
+               spin_lock_bh(&s->idr_lock);
+               idr_remove(&s->conn_idr, con->conid);
+               s->idr_in_use--;
+               spin_unlock_bh(&s->idr_lock);
        }
 
        tipc_clean_outqueues(con);
@@ -128,8 +132,10 @@ static struct tipc_conn *tipc_conn_lookup(struct tipc_server *s, int conid)
 
        spin_lock_bh(&s->idr_lock);
        con = idr_find(&s->conn_idr, conid);
-       if (con)
+       if (con && test_bit(CF_CONNECTED, &con->flags))
                conn_get(con);
+       else
+               con = NULL;
        spin_unlock_bh(&s->idr_lock);
        return con;
 }
@@ -186,26 +192,15 @@ static void tipc_unregister_callbacks(struct tipc_conn *con)
        write_unlock_bh(&sk->sk_callback_lock);
 }
 
-static void tipc_sock_release(struct tipc_conn *con)
-{
-       struct tipc_server *s = con->server;
-
-       if (con->conid)
-               s->tipc_conn_release(con->conid, con->usr_data);
-
-       tipc_unregister_callbacks(con);
-}
-
 static void tipc_close_conn(struct tipc_conn *con)
 {
        struct tipc_server *s = con->server;
 
        if (test_and_clear_bit(CF_CONNECTED, &con->flags)) {
+               tipc_unregister_callbacks(con);
 
-               spin_lock_bh(&s->idr_lock);
-               idr_remove(&s->conn_idr, con->conid);
-               s->idr_in_use--;
-               spin_unlock_bh(&s->idr_lock);
+               if (con->conid)
+                       s->tipc_conn_release(con->conid, con->usr_data);
 
                /* We shouldn't flush pending works as we may be in the
                 * thread. In fact the races with pending rx/tx work structs
@@ -458,6 +453,11 @@ int tipc_conn_sendmsg(struct tipc_server *s, int conid,
        if (!con)
                return -EINVAL;
 
+       if (!test_bit(CF_CONNECTED, &con->flags)) {
+               conn_put(con);
+               return 0;
+       }
+
        e = tipc_alloc_entry(data, len);
        if (!e) {
                conn_put(con);
@@ -471,12 +471,8 @@ int tipc_conn_sendmsg(struct tipc_server *s, int conid,
        list_add_tail(&e->list, &con->outqueue);
        spin_unlock_bh(&con->outqueue_lock);
 
-       if (test_bit(CF_CONNECTED, &con->flags)) {
-               if (!queue_work(s->send_wq, &con->swork))
-                       conn_put(con);
-       } else {
+       if (!queue_work(s->send_wq, &con->swork))
                conn_put(con);
-       }
        return 0;
 }
 
@@ -500,7 +496,7 @@ static void tipc_send_to_sock(struct tipc_conn *con)
        int ret;
 
        spin_lock_bh(&con->outqueue_lock);
-       while (1) {
+       while (test_bit(CF_CONNECTED, &con->flags)) {
                e = list_entry(con->outqueue.next, struct outqueue_entry,
                               list);
                if ((struct list_head *) e == &con->outqueue)
@@ -623,14 +619,12 @@ int tipc_server_start(struct tipc_server *s)
 void tipc_server_stop(struct tipc_server *s)
 {
        struct tipc_conn *con;
-       int total = 0;
        int id;
 
        spin_lock_bh(&s->idr_lock);
-       for (id = 0; total < s->idr_in_use; id++) {
+       for (id = 0; s->idr_in_use; id++) {
                con = idr_find(&s->conn_idr, id);
                if (con) {
-                       total++;
                        spin_unlock_bh(&s->idr_lock);
                        tipc_close_conn(con);
                        spin_lock_bh(&s->idr_lock);
index 0dd02244e21d72b8e53e371d51eeae53e4b15a41..9d94e65d0894183b4af94ed24e84b94c0478b551 100644 (file)
@@ -54,6 +54,8 @@ struct tipc_subscriber {
 
 static void tipc_subscrp_delete(struct tipc_subscription *sub);
 static void tipc_subscrb_put(struct tipc_subscriber *subscriber);
+static void tipc_subscrp_put(struct tipc_subscription *subscription);
+static void tipc_subscrp_get(struct tipc_subscription *subscription);
 
 /**
  * htohl - convert value to endianness used by destination
@@ -123,6 +125,7 @@ void tipc_subscrp_report_overlap(struct tipc_subscription *sub, u32 found_lower,
 {
        struct tipc_name_seq seq;
 
+       tipc_subscrp_get(sub);
        tipc_subscrp_convert_seq(&sub->evt.s.seq, sub->swap, &seq);
        if (!tipc_subscrp_check_overlap(&seq, found_lower, found_upper))
                return;
@@ -132,30 +135,23 @@ void tipc_subscrp_report_overlap(struct tipc_subscription *sub, u32 found_lower,
 
        tipc_subscrp_send_event(sub, found_lower, found_upper, event, port_ref,
                                node);
+       tipc_subscrp_put(sub);
 }
 
 static void tipc_subscrp_timeout(unsigned long data)
 {
        struct tipc_subscription *sub = (struct tipc_subscription *)data;
-       struct tipc_subscriber *subscriber = sub->subscriber;
 
        /* Notify subscriber of timeout */
        tipc_subscrp_send_event(sub, sub->evt.s.seq.lower, sub->evt.s.seq.upper,
                                TIPC_SUBSCR_TIMEOUT, 0, 0);
 
-       spin_lock_bh(&subscriber->lock);
-       tipc_subscrp_delete(sub);
-       spin_unlock_bh(&subscriber->lock);
-
-       tipc_subscrb_put(subscriber);
+       tipc_subscrp_put(sub);
 }
 
 static void tipc_subscrb_kref_release(struct kref *kref)
 {
-       struct tipc_subscriber *subcriber = container_of(kref,
-                                           struct tipc_subscriber, kref);
-
-       kfree(subcriber);
+       kfree(container_of(kref,struct tipc_subscriber, kref));
 }
 
 static void tipc_subscrb_put(struct tipc_subscriber *subscriber)
@@ -168,6 +164,59 @@ static void tipc_subscrb_get(struct tipc_subscriber *subscriber)
        kref_get(&subscriber->kref);
 }
 
+static void tipc_subscrp_kref_release(struct kref *kref)
+{
+       struct tipc_subscription *sub = container_of(kref,
+                                                    struct tipc_subscription,
+                                                    kref);
+       struct tipc_net *tn = net_generic(sub->net, tipc_net_id);
+       struct tipc_subscriber *subscriber = sub->subscriber;
+
+       spin_lock_bh(&subscriber->lock);
+       tipc_nametbl_unsubscribe(sub);
+       list_del(&sub->subscrp_list);
+       atomic_dec(&tn->subscription_count);
+       spin_unlock_bh(&subscriber->lock);
+       kfree(sub);
+       tipc_subscrb_put(subscriber);
+}
+
+static void tipc_subscrp_put(struct tipc_subscription *subscription)
+{
+       kref_put(&subscription->kref, tipc_subscrp_kref_release);
+}
+
+static void tipc_subscrp_get(struct tipc_subscription *subscription)
+{
+       kref_get(&subscription->kref);
+}
+
+/* tipc_subscrb_subscrp_delete - delete a specific subscription or all
+ * subscriptions for a given subscriber.
+ */
+static void tipc_subscrb_subscrp_delete(struct tipc_subscriber *subscriber,
+                                       struct tipc_subscr *s)
+{
+       struct list_head *subscription_list = &subscriber->subscrp_list;
+       struct tipc_subscription *sub, *temp;
+
+       spin_lock_bh(&subscriber->lock);
+       list_for_each_entry_safe(sub, temp, subscription_list,  subscrp_list) {
+               if (s && memcmp(s, &sub->evt.s, sizeof(struct tipc_subscr)))
+                       continue;
+
+               tipc_subscrp_get(sub);
+               spin_unlock_bh(&subscriber->lock);
+               tipc_subscrp_delete(sub);
+               tipc_subscrp_put(sub);
+               spin_lock_bh(&subscriber->lock);
+
+               if (s)
+                       break;
+       }
+       spin_unlock_bh(&subscriber->lock);
+}
+
 static struct tipc_subscriber *tipc_subscrb_create(int conid)
 {
        struct tipc_subscriber *subscriber;
@@ -177,8 +226,8 @@ static struct tipc_subscriber *tipc_subscrb_create(int conid)
                pr_warn("Subscriber rejected, no memory\n");
                return NULL;
        }
-       kref_init(&subscriber->kref);
        INIT_LIST_HEAD(&subscriber->subscrp_list);
+       kref_init(&subscriber->kref);
        subscriber->conid = conid;
        spin_lock_init(&subscriber->lock);
 
@@ -187,55 +236,22 @@ static struct tipc_subscriber *tipc_subscrb_create(int conid)
 
 static void tipc_subscrb_delete(struct tipc_subscriber *subscriber)
 {
-       struct tipc_subscription *sub, *temp;
-       u32 timeout;
-
-       spin_lock_bh(&subscriber->lock);
-       /* Destroy any existing subscriptions for subscriber */
-       list_for_each_entry_safe(sub, temp, &subscriber->subscrp_list,
-                                subscrp_list) {
-               timeout = htohl(sub->evt.s.timeout, sub->swap);
-               if ((timeout == TIPC_WAIT_FOREVER) || del_timer(&sub->timer)) {
-                       tipc_subscrp_delete(sub);
-                       tipc_subscrb_put(subscriber);
-               }
-       }
-       spin_unlock_bh(&subscriber->lock);
-
+       tipc_subscrb_subscrp_delete(subscriber, NULL);
        tipc_subscrb_put(subscriber);
 }
 
 static void tipc_subscrp_delete(struct tipc_subscription *sub)
 {
-       struct tipc_net *tn = net_generic(sub->net, tipc_net_id);
+       u32 timeout = htohl(sub->evt.s.timeout, sub->swap);
 
-       tipc_nametbl_unsubscribe(sub);
-       list_del(&sub->subscrp_list);
-       kfree(sub);
-       atomic_dec(&tn->subscription_count);
+       if (timeout == TIPC_WAIT_FOREVER || del_timer(&sub->timer))
+               tipc_subscrp_put(sub);
 }
 
 static void tipc_subscrp_cancel(struct tipc_subscr *s,
                                struct tipc_subscriber *subscriber)
 {
-       struct tipc_subscription *sub, *temp;
-       u32 timeout;
-
-       spin_lock_bh(&subscriber->lock);
-       /* Find first matching subscription, exit if not found */
-       list_for_each_entry_safe(sub, temp, &subscriber->subscrp_list,
-                                subscrp_list) {
-               if (!memcmp(s, &sub->evt.s, sizeof(struct tipc_subscr))) {
-                       timeout = htohl(sub->evt.s.timeout, sub->swap);
-                       if ((timeout == TIPC_WAIT_FOREVER) ||
-                           del_timer(&sub->timer)) {
-                               tipc_subscrp_delete(sub);
-                               tipc_subscrb_put(subscriber);
-                       }
-                       break;
-               }
-       }
-       spin_unlock_bh(&subscriber->lock);
+       tipc_subscrb_subscrp_delete(subscriber, s);
 }
 
 static struct tipc_subscription *tipc_subscrp_create(struct net *net,
@@ -272,6 +288,7 @@ static struct tipc_subscription *tipc_subscrp_create(struct net *net,
        sub->swap = swap;
        memcpy(&sub->evt.s, s, sizeof(*s));
        atomic_inc(&tn->subscription_count);
+       kref_init(&sub->kref);
        return sub;
 }
 
@@ -288,17 +305,16 @@ static void tipc_subscrp_subscribe(struct net *net, struct tipc_subscr *s,
 
        spin_lock_bh(&subscriber->lock);
        list_add(&sub->subscrp_list, &subscriber->subscrp_list);
-       tipc_subscrb_get(subscriber);
        sub->subscriber = subscriber;
        tipc_nametbl_subscribe(sub);
+       tipc_subscrb_get(subscriber);
        spin_unlock_bh(&subscriber->lock);
 
+       setup_timer(&sub->timer, tipc_subscrp_timeout, (unsigned long)sub);
        timeout = htohl(sub->evt.s.timeout, swap);
-       if (timeout == TIPC_WAIT_FOREVER)
-               return;
 
-       setup_timer(&sub->timer, tipc_subscrp_timeout, (unsigned long)sub);
-       mod_timer(&sub->timer, jiffies + msecs_to_jiffies(timeout));
+       if (timeout != TIPC_WAIT_FOREVER)
+               mod_timer(&sub->timer, jiffies + msecs_to_jiffies(timeout));
 }
 
 /* Handle one termination request for the subscriber */
index be60103082c923c0fd768f52c081af38eb42491b..ffdc214c117a924f34b416fde415fcd18201ebc0 100644 (file)
@@ -57,6 +57,7 @@ struct tipc_subscriber;
  * @evt: template for events generated by subscription
  */
 struct tipc_subscription {
+       struct kref kref;
        struct tipc_subscriber *subscriber;
        struct net *net;
        struct timer_list timer;
index 127656ebe7be47af8ebb8ea288340177fd068049..cef79873b09d2051663fedf37dc52874b7f7c415 100644 (file)
@@ -995,6 +995,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
        unsigned int hash;
        struct unix_address *addr;
        struct hlist_head *list;
+       struct path path = { NULL, NULL };
 
        err = -EINVAL;
        if (sunaddr->sun_family != AF_UNIX)
@@ -1010,9 +1011,20 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
                goto out;
        addr_len = err;
 
+       if (sun_path[0]) {
+               umode_t mode = S_IFSOCK |
+                      (SOCK_INODE(sock)->i_mode & ~current_umask());
+               err = unix_mknod(sun_path, mode, &path);
+               if (err) {
+                       if (err == -EEXIST)
+                               err = -EADDRINUSE;
+                       goto out;
+               }
+       }
+
        err = mutex_lock_interruptible(&u->bindlock);
        if (err)
-               goto out;
+               goto out_put;
 
        err = -EINVAL;
        if (u->addr)
@@ -1029,16 +1041,6 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
        atomic_set(&addr->refcnt, 1);
 
        if (sun_path[0]) {
-               struct path path;
-               umode_t mode = S_IFSOCK |
-                      (SOCK_INODE(sock)->i_mode & ~current_umask());
-               err = unix_mknod(sun_path, mode, &path);
-               if (err) {
-                       if (err == -EEXIST)
-                               err = -EADDRINUSE;
-                       unix_release_addr(addr);
-                       goto out_up;
-               }
                addr->hash = UNIX_HASH_SIZE;
                hash = d_backing_inode(path.dentry)->i_ino & (UNIX_HASH_SIZE - 1);
                spin_lock(&unix_table_lock);
@@ -1065,6 +1067,9 @@ out_unlock:
        spin_unlock(&unix_table_lock);
 out_up:
        mutex_unlock(&u->bindlock);
+out_put:
+       if (err)
+               path_put(&path);
 out:
        return err;
 }
index 5c1b267e22beefe7cfa83e3541783fab702a092a..aee396b9f190bb4454844c7282fd4d3f5d85b5e7 100644 (file)
@@ -5916,6 +5916,7 @@ do {                                                                          \
                        break;
                }
                cfg->ht_opmode = ht_opmode;
+               mask |= (1 << (NL80211_MESHCONF_HT_OPMODE - 1));
        }
        FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPactivePathToRootTimeout,
                                  1, 65535, mask,
index 396e204888b34f342bd8f01afc4e0d1a3d52f823..b86ee54da2d14d6ba0de18481d2c55ed1a70a67b 100644 (file)
@@ -277,6 +277,11 @@ int load_bpf_file(char *path)
        Elf_Data *data, *data_prog, *symbols = NULL;
        char *shname, *shname_prog;
 
+       /* reset global variables */
+       kern_version = 0;
+       memset(license, 0, sizeof(license));
+       memset(processed_sec, 0, sizeof(processed_sec));
+
        if (elf_version(EV_CURRENT) == EV_NONE)
                return 1;
 
@@ -328,6 +333,8 @@ int load_bpf_file(char *path)
 
        /* load programs that need map fixup (relocations) */
        for (i = 1; i < ehdr.e_shnum; i++) {
+               if (processed_sec[i])
+                       continue;
 
                if (get_sec(elf, i, &ehdr, &shname, &shdr, &data))
                        continue;
index 92a44729dbe43ec05f5556939d37ab0a9fc2fa2f..7ef2a12b25b244b2700ab7feee961fb91b435970 100644 (file)
@@ -4,6 +4,7 @@
  * modify it under the terms of version 2 of the GNU General Public
  * License as published by the Free Software Foundation.
  */
+#define KBUILD_MODNAME "foo"
 #include <uapi/linux/bpf.h>
 #include <uapi/linux/if_ether.h>
 #include <uapi/linux/if_packet.h>
index 504058631ffccef319f9bc7f6a3349d6f854bdbd..4bfcaf93fcf308e8b94c44cfc46c186e74d56cb7 100644 (file)
@@ -104,7 +104,7 @@ static int attach_filter(int cg_fd, int type, int verdict)
                return EXIT_FAILURE;
        }
 
-       ret = bpf_prog_attach(prog_fd, cg_fd, type);
+       ret = bpf_prog_attach(prog_fd, cg_fd, type, 0);
        if (ret < 0) {
                printf("Failed to attach prog to cgroup: '%s'\n",
                       strerror(errno));
index 6e69be37f87f700dfd6a2c13a8d6310164b0cdd9..3049b1f26267c40836c79423362aeeb125b5761b 100644 (file)
@@ -79,11 +79,12 @@ int main(int argc, char **argv)
        if (join_cgroup(FOO))
                goto err;
 
-       if (bpf_prog_attach(drop_prog, foo, BPF_CGROUP_INET_EGRESS)) {
+       if (bpf_prog_attach(drop_prog, foo, BPF_CGROUP_INET_EGRESS, 1)) {
                log_err("Attaching prog to /foo");
                goto err;
        }
 
+       printf("Attached DROP prog. This ping in cgroup /foo should fail...\n");
        assert(system(PING_CMD) != 0);
 
        /* Create cgroup /foo/bar, get fd, and join it */
@@ -94,24 +95,27 @@ int main(int argc, char **argv)
        if (join_cgroup(BAR))
                goto err;
 
+       printf("Attached DROP prog. This ping in cgroup /foo/bar should fail...\n");
        assert(system(PING_CMD) != 0);
 
-       if (bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS)) {
+       if (bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, 1)) {
                log_err("Attaching prog to /foo/bar");
                goto err;
        }
 
+       printf("Attached PASS prog. This ping in cgroup /foo/bar should pass...\n");
        assert(system(PING_CMD) == 0);
 
-
        if (bpf_prog_detach(bar, BPF_CGROUP_INET_EGRESS)) {
                log_err("Detaching program from /foo/bar");
                goto err;
        }
 
+       printf("Detached PASS from /foo/bar while DROP is attached to /foo.\n"
+              "This ping in cgroup /foo/bar should fail...\n");
        assert(system(PING_CMD) != 0);
 
-       if (bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS)) {
+       if (bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, 1)) {
                log_err("Attaching prog to /foo/bar");
                goto err;
        }
@@ -121,8 +125,60 @@ int main(int argc, char **argv)
                goto err;
        }
 
+       printf("Attached PASS from /foo/bar and detached DROP from /foo.\n"
+              "This ping in cgroup /foo/bar should pass...\n");
        assert(system(PING_CMD) == 0);
 
+       if (bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, 1)) {
+               log_err("Attaching prog to /foo/bar");
+               goto err;
+       }
+
+       if (!bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, 0)) {
+               errno = 0;
+               log_err("Unexpected success attaching prog to /foo/bar");
+               goto err;
+       }
+
+       if (bpf_prog_detach(bar, BPF_CGROUP_INET_EGRESS)) {
+               log_err("Detaching program from /foo/bar");
+               goto err;
+       }
+
+       if (!bpf_prog_detach(foo, BPF_CGROUP_INET_EGRESS)) {
+               errno = 0;
+               log_err("Unexpected success in double detach from /foo");
+               goto err;
+       }
+
+       if (bpf_prog_attach(allow_prog, foo, BPF_CGROUP_INET_EGRESS, 0)) {
+               log_err("Attaching non-overridable prog to /foo");
+               goto err;
+       }
+
+       if (!bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, 0)) {
+               errno = 0;
+               log_err("Unexpected success attaching non-overridable prog to /foo/bar");
+               goto err;
+       }
+
+       if (!bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, 1)) {
+               errno = 0;
+               log_err("Unexpected success attaching overridable prog to /foo/bar");
+               goto err;
+       }
+
+       if (!bpf_prog_attach(allow_prog, foo, BPF_CGROUP_INET_EGRESS, 1)) {
+               errno = 0;
+               log_err("Unexpected success attaching overridable prog to /foo");
+               goto err;
+       }
+
+       if (bpf_prog_attach(drop_prog, foo, BPF_CGROUP_INET_EGRESS, 0)) {
+               log_err("Attaching different non-overridable prog to /foo");
+               goto err;
+       }
+
        goto out;
 
 err:
@@ -132,5 +188,9 @@ out:
        close(foo);
        close(bar);
        cleanup_cgroup_environment();
+       if (!rc)
+               printf("PASS\n");
+       else
+               printf("FAIL\n");
        return rc;
 }
index 0791b949cbe418e49fa29ab1efb52007e29d67d8..c3cfb23e23b52832d795b2cc095f94b3f49d9f0b 100644 (file)
@@ -75,7 +75,7 @@ int main(int argc, char **argv)
                return EXIT_FAILURE;
        }
 
-       ret = bpf_prog_attach(prog_fd, cg_fd, BPF_CGROUP_INET_SOCK_CREATE);
+       ret = bpf_prog_attach(prog_fd, cg_fd, BPF_CGROUP_INET_SOCK_CREATE, 0);
        if (ret < 0) {
                printf("Failed to attach prog to cgroup: '%s'\n",
                       strerror(errno));
index 455ef0d06e932516e0591438c7b6e4d8ef831ac2..db036077b64480b6af3c9f42d6ac61b758384152 100644 (file)
@@ -55,7 +55,7 @@ int main(int argc, char **argv)
        }
 
        ret = bpf_prog_attach(prog_fd[filter_id], cg_fd,
-                             BPF_CGROUP_INET_SOCK_CREATE);
+                             BPF_CGROUP_INET_SOCK_CREATE, 0);
        if (ret < 0) {
                printf("Failed to attach prog to cgroup: '%s'\n",
                       strerror(errno));
index fd12d7154d420c72df35c5905893d54546ec70bf..7e4cf74553ff356c47aa6e8b2a642d92fc7bfb4d 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/version.h>
 #include <uapi/linux/bpf.h>
 #include <uapi/linux/seccomp.h>
+#include <uapi/linux/unistd.h>
 #include "bpf_helpers.h"
 
 #define PROG(F) SEC("kprobe/"__stringify(F)) int bpf_func_##F
index 85c38ecd3a2dcf5f65f99011a73be87d78c287ff..0f4f6e8c8611e3dea0e758215a65cc185ecdae02 100644 (file)
@@ -8,6 +8,7 @@
  * encapsulating the incoming packet in an IPv4/v6 header
  * and then XDP_TX it out.
  */
+#define KBUILD_MODNAME "foo"
 #include <uapi/linux/bpf.h>
 #include <linux/in.h>
 #include <linux/if_ether.h>
index eadcd4d359d91fc7823a75263c44c520e05f900b..d883116ebaa452d9c2f6c657de53121ebd9d50bd 100644 (file)
@@ -164,6 +164,7 @@ cmd_gensymtypes_c =                                                         \
     $(CPP) -D__GENKSYMS__ $(c_flags) $< |                                   \
     $(GENKSYMS) $(if $(1), -T $(2))                                         \
      $(patsubst y,-s _,$(CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX))             \
+     $(patsubst y,-R,$(CONFIG_MODULE_REL_CRCS))                             \
      $(if $(KBUILD_PRESERVE),-p)                                            \
      -r $(firstword $(wildcard $(2:.symtypes=.symref) /dev/null))
 
@@ -337,6 +338,7 @@ cmd_gensymtypes_S =                                                         \
     $(CPP) -D__GENKSYMS__ $(c_flags) -xc - |                                \
     $(GENKSYMS) $(if $(1), -T $(2))                                         \
      $(patsubst y,-s _,$(CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX))             \
+     $(patsubst y,-R,$(CONFIG_MODULE_REL_CRCS))                             \
      $(if $(KBUILD_PRESERVE),-p)                                            \
      -r $(firstword $(wildcard $(2:.symtypes=.symref) /dev/null))
 
index a0ba48fa2c5eaa6d1c883dde91e0223676b4d135..20cdb2bc1daec4fc1205c71c38c8d082f0602863 100755 (executable)
 #         https://01.org/suspendresume
 #       Source repo
 #         https://github.com/01org/suspendresume
-#       Documentation
-#         Getting Started
-#           https://01.org/suspendresume/documentation/getting-started
-#         Command List:
-#           https://01.org/suspendresume/documentation/command-list
 #
 # Description:
 #       This tool is designed to assist kernel and OS developers in optimizing
@@ -66,6 +61,8 @@ import platform
 from datetime import datetime
 import struct
 import ConfigParser
+from threading import Thread
+from subprocess import call, Popen, PIPE
 
 # ----------------- CLASSES --------------------
 
@@ -75,11 +72,15 @@ import ConfigParser
 #       store system values and test parameters
 class SystemValues:
        ansi = False
-       version = '4.2'
+       version = '4.5'
        verbose = False
        addlogs = False
-       mindevlen = 0.001
-       mincglen = 1.0
+       mindevlen = 0.0
+       mincglen = 0.0
+       cgphase = ''
+       cgtest = -1
+       callloopmaxgap = 0.0001
+       callloopmaxlen = 0.005
        srgap = 0
        cgexp = False
        outdir = ''
@@ -92,6 +93,7 @@ class SystemValues:
                'device_pm_callback_end',
                'device_pm_callback_start'
        ]
+       logmsg = ''
        testcommand = ''
        mempath = '/dev/mem'
        powerfile = '/sys/power/state'
@@ -117,19 +119,19 @@ class SystemValues:
        usetracemarkers = True
        usekprobes = True
        usedevsrc = False
+       useprocmon = False
        notestrun = False
+       mixedphaseheight = True
        devprops = dict()
-       postresumetime = 0
+       predelay = 0
+       postdelay = 0
+       procexecfmt = 'ps - (?P<ps>.*)$'
        devpropfmt = '# Device Properties: .*'
        tracertypefmt = '# tracer: (?P<t>.*)'
        firmwarefmt = '# fwsuspend (?P<s>[0-9]*) fwresume (?P<r>[0-9]*)$'
-       postresumefmt = '# post resume time (?P<t>[0-9]*)$'
        stampfmt = '# suspend-(?P<m>[0-9]{2})(?P<d>[0-9]{2})(?P<y>[0-9]{2})-'+\
                                '(?P<H>[0-9]{2})(?P<M>[0-9]{2})(?P<S>[0-9]{2})'+\
                                ' (?P<host>.*) (?P<mode>.*) (?P<kernel>.*)$'
-       kprobecolor = 'rgba(204,204,204,0.5)'
-       synccolor = 'rgba(204,204,204,0.5)'
-       debugfuncs = []
        tracefuncs = {
                'sys_sync': dict(),
                'pm_prepare_console': dict(),
@@ -152,44 +154,66 @@ class SystemValues:
                'CPU_OFF': {
                        'func':'_cpu_down',
                        'args_x86_64': {'cpu':'%di:s32'},
-                       'format': 'CPU_OFF[{cpu}]',
-                       'mask': 'CPU_.*_DOWN'
+                       'format': 'CPU_OFF[{cpu}]'
                },
                'CPU_ON': {
                        'func':'_cpu_up',
                        'args_x86_64': {'cpu':'%di:s32'},
-                       'format': 'CPU_ON[{cpu}]',
-                       'mask': 'CPU_.*_UP'
+                       'format': 'CPU_ON[{cpu}]'
                },
        }
        dev_tracefuncs = {
                # general wait/delay/sleep
-               'msleep': { 'args_x86_64': {'time':'%di:s32'} },
-               'udelay': { 'func':'__const_udelay', 'args_x86_64': {'loops':'%di:s32'} },
-               'acpi_os_stall': dict(),
+               'msleep': { 'args_x86_64': {'time':'%di:s32'}, 'ub': 1 },
+               'schedule_timeout_uninterruptible': { 'args_x86_64': {'timeout':'%di:s32'}, 'ub': 1 },
+               'schedule_timeout': { 'args_x86_64': {'timeout':'%di:s32'}, 'ub': 1 },
+               'udelay': { 'func':'__const_udelay', 'args_x86_64': {'loops':'%di:s32'}, 'ub': 1 },
+               'usleep_range': { 'args_x86_64': {'min':'%di:s32', 'max':'%si:s32'}, 'ub': 1 },
+               'mutex_lock_slowpath': { 'func':'__mutex_lock_slowpath', 'ub': 1 },
+               'acpi_os_stall': {'ub': 1},
                # ACPI
                'acpi_resume_power_resources': dict(),
                'acpi_ps_parse_aml': dict(),
                # filesystem
                'ext4_sync_fs': dict(),
+               # 80211
+               'iwlagn_mac_start': dict(),
+               'iwlagn_alloc_bcast_station': dict(),
+               'iwl_trans_pcie_start_hw': dict(),
+               'iwl_trans_pcie_start_fw': dict(),
+               'iwl_run_init_ucode': dict(),
+               'iwl_load_ucode_wait_alive': dict(),
+               'iwl_alive_start': dict(),
+               'iwlagn_mac_stop': dict(),
+               'iwlagn_mac_suspend': dict(),
+               'iwlagn_mac_resume': dict(),
+               'iwlagn_mac_add_interface': dict(),
+               'iwlagn_mac_remove_interface': dict(),
+               'iwlagn_mac_change_interface': dict(),
+               'iwlagn_mac_config': dict(),
+               'iwlagn_configure_filter': dict(),
+               'iwlagn_mac_hw_scan': dict(),
+               'iwlagn_bss_info_changed': dict(),
+               'iwlagn_mac_channel_switch': dict(),
+               'iwlagn_mac_flush': dict(),
                # ATA
                'ata_eh_recover': { 'args_x86_64': {'port':'+36(%di):s32'} },
                # i915
-               'i915_gem_restore_gtt_mappings': dict(),
+               'i915_gem_resume': dict(),
+               'i915_restore_state': dict(),
                'intel_opregion_setup': dict(),
+               'g4x_pre_enable_dp': dict(),
+               'vlv_pre_enable_dp': dict(),
+               'chv_pre_enable_dp': dict(),
+               'g4x_enable_dp': dict(),
+               'vlv_enable_dp': dict(),
+               'intel_hpd_init': dict(),
+               'intel_opregion_register': dict(),
                'intel_dp_detect': dict(),
                'intel_hdmi_detect': dict(),
                'intel_opregion_init': dict(),
+               'intel_fbdev_set_suspend': dict(),
        }
-       kprobes_postresume = [
-               {
-                       'name': 'ataportrst',
-                       'func': 'ata_eh_recover',
-                       'args': {'port':'+36(%di):s32'},
-                       'format': 'ata{port}_port_reset',
-                       'mask': 'ata.*_port_reset'
-               }
-       ]
        kprobes = dict()
        timeformat = '%.3f'
        def __init__(self):
@@ -198,6 +222,7 @@ class SystemValues:
                        self.embedded = True
                        self.addlogs = True
                        self.htmlfile = os.environ['LOG_FILE']
+               self.archargs = 'args_'+platform.machine()
                self.hostname = platform.node()
                if(self.hostname == ''):
                        self.hostname = 'localhost'
@@ -214,6 +239,13 @@ class SystemValues:
                if num < 0 or num > 6:
                        return
                self.timeformat = '%.{0}f'.format(num)
+       def setOutputFolder(self, value):
+               args = dict()
+               n = datetime.now()
+               args['date'] = n.strftime('%y%m%d')
+               args['time'] = n.strftime('%H%M%S')
+               args['hostname'] = self.hostname
+               self.outdir = value.format(**args)
        def setOutputFile(self):
                if((self.htmlfile == '') and (self.dmesgfile != '')):
                        m = re.match('(?P<name>.*)_dmesg\.txt$', self.dmesgfile)
@@ -253,10 +285,14 @@ class SystemValues:
                        self.testdir+'/'+self.prefix+'_'+self.suspendmode+'.html'
                if not os.path.isdir(self.testdir):
                        os.mkdir(self.testdir)
-       def setDeviceFilter(self, devnames):
-               self.devicefilter = string.split(devnames)
+       def setDeviceFilter(self, value):
+               self.devicefilter = []
+               if value:
+                       value = value.split(',')
+               for i in value:
+                       self.devicefilter.append(i.strip())
        def rtcWakeAlarmOn(self):
-               os.system('echo 0 > '+self.rtcpath+'/wakealarm')
+               call('echo 0 > '+self.rtcpath+'/wakealarm', shell=True)
                outD = open(self.rtcpath+'/date', 'r').read().strip()
                outT = open(self.rtcpath+'/time', 'r').read().strip()
                mD = re.match('^(?P<y>[0-9]*)-(?P<m>[0-9]*)-(?P<d>[0-9]*)', outD)
@@ -272,12 +308,12 @@ class SystemValues:
                        # if hardware time fails, use the software time
                        nowtime = int(datetime.now().strftime('%s'))
                alarm = nowtime + self.rtcwaketime
-               os.system('echo %d > %s/wakealarm' % (alarm, self.rtcpath))
+               call('echo %d > %s/wakealarm' % (alarm, self.rtcpath), shell=True)
        def rtcWakeAlarmOff(self):
-               os.system('echo 0 > %s/wakealarm' % self.rtcpath)
+               call('echo 0 > %s/wakealarm' % self.rtcpath, shell=True)
        def initdmesg(self):
                # get the latest time stamp from the dmesg log
-               fp = os.popen('dmesg')
+               fp = Popen('dmesg', stdout=PIPE).stdout
                ktime = '0'
                for line in fp:
                        line = line.replace('\r\n', '')
@@ -291,7 +327,7 @@ class SystemValues:
                self.dmesgstart = float(ktime)
        def getdmesg(self):
                # store all new dmesg lines since initdmesg was called
-               fp = os.popen('dmesg')
+               fp = Popen('dmesg', stdout=PIPE).stdout
                op = open(self.dmesgfile, 'a')
                for line in fp:
                        line = line.replace('\r\n', '')
@@ -317,25 +353,18 @@ class SystemValues:
        def getFtraceFilterFunctions(self, current):
                rootCheck(True)
                if not current:
-                       os.system('cat '+self.tpath+'available_filter_functions')
+                       call('cat '+self.tpath+'available_filter_functions', shell=True)
                        return
                fp = open(self.tpath+'available_filter_functions')
                master = fp.read().split('\n')
                fp.close()
-               if len(self.debugfuncs) > 0:
-                       for i in self.debugfuncs:
-                               if i in master:
-                                       print i
-                               else:
-                                       print self.colorText(i)
-               else:
-                       for i in self.tracefuncs:
-                               if 'func' in self.tracefuncs[i]:
-                                       i = self.tracefuncs[i]['func']
-                               if i in master:
-                                       print i
-                               else:
-                                       print self.colorText(i)
+               for i in self.tracefuncs:
+                       if 'func' in self.tracefuncs[i]:
+                               i = self.tracefuncs[i]['func']
+                       if i in master:
+                               print i
+                       else:
+                               print self.colorText(i)
        def setFtraceFilterFunctions(self, list):
                fp = open(self.tpath+'available_filter_functions')
                master = fp.read().split('\n')
@@ -351,22 +380,15 @@ class SystemValues:
                fp = open(self.tpath+'set_graph_function', 'w')
                fp.write(flist)
                fp.close()
-       def kprobeMatch(self, name, target):
-               if name not in self.kprobes:
-                       return False
-               if re.match(self.kprobes[name]['mask'], target):
-                       return True
-               return False
        def basicKprobe(self, name):
-               self.kprobes[name] = {'name': name,'func': name,'args': dict(),'format': name,'mask': name}
+               self.kprobes[name] = {'name': name,'func': name,'args': dict(),'format': name}
        def defaultKprobe(self, name, kdata):
                k = kdata
-               for field in ['name', 'format', 'mask', 'func']:
+               for field in ['name', 'format', 'func']:
                        if field not in k:
                                k[field] = name
-               archargs = 'args_'+platform.machine()
-               if archargs in k:
-                       k['args'] = k[archargs]
+               if self.archargs in k:
+                       k['args'] = k[self.archargs]
                else:
                        k['args'] = dict()
                        k['format'] = name
@@ -403,49 +425,80 @@ class SystemValues:
                out = fmt.format(**arglist)
                out = out.replace(' ', '_').replace('"', '')
                return out
-       def kprobeText(self, kprobe):
-               name, fmt, func, args = kprobe['name'], kprobe['format'], kprobe['func'], kprobe['args']
+       def kprobeText(self, kname, kprobe):
+               name = fmt = func = kname
+               args = dict()
+               if 'name' in kprobe:
+                       name = kprobe['name']
+               if 'format' in kprobe:
+                       fmt = kprobe['format']
+               if 'func' in kprobe:
+                       func = kprobe['func']
+               if self.archargs in kprobe:
+                       args = kprobe[self.archargs]
+               if 'args' in kprobe:
+                       args = kprobe['args']
                if re.findall('{(?P<n>[a-z,A-Z,0-9]*)}', func):
-                       doError('Kprobe "%s" has format info in the function name "%s"' % (name, func), False)
+                       doError('Kprobe "%s" has format info in the function name "%s"' % (name, func))
                for arg in re.findall('{(?P<n>[a-z,A-Z,0-9]*)}', fmt):
                        if arg not in args:
-                               doError('Kprobe "%s" is missing argument "%s"' % (name, arg), False)
+                               doError('Kprobe "%s" is missing argument "%s"' % (name, arg))
                val = 'p:%s_cal %s' % (name, func)
                for i in sorted(args):
                        val += ' %s=%s' % (i, args[i])
                val += '\nr:%s_ret %s $retval\n' % (name, func)
                return val
-       def addKprobes(self):
+       def addKprobes(self, output=False):
+               if len(sysvals.kprobes) < 1:
+                       return
+               if output:
+                       print('    kprobe functions in this kernel:')
                # first test each kprobe
-               print('INITIALIZING KPROBES...')
                rejects = []
+               # sort kprobes: trace, ub-dev, custom, dev
+               kpl = [[], [], [], []]
                for name in sorted(self.kprobes):
-                       if not self.testKprobe(self.kprobes[name]):
+                       res = self.colorText('YES', 32)
+                       if not self.testKprobe(name, self.kprobes[name]):
+                               res = self.colorText('NO')
                                rejects.append(name)
+                       else:
+                               if name in self.tracefuncs:
+                                       kpl[0].append(name)
+                               elif name in self.dev_tracefuncs:
+                                       if 'ub' in self.dev_tracefuncs[name]:
+                                               kpl[1].append(name)
+                                       else:
+                                               kpl[3].append(name)
+                               else:
+                                       kpl[2].append(name)
+                       if output:
+                               print('         %s: %s' % (name, res))
+               kplist = kpl[0] + kpl[1] + kpl[2] + kpl[3]
                # remove all failed ones from the list
                for name in rejects:
-                       vprint('Skipping KPROBE: %s' % name)
                        self.kprobes.pop(name)
+               # set the kprobes all at once
                self.fsetVal('', 'kprobe_events')
                kprobeevents = ''
-               # set the kprobes all at once
-               for kp in self.kprobes:
-                       val = self.kprobeText(self.kprobes[kp])
-                       vprint('Adding KPROBE: %s\n%s' % (kp, val.strip()))
-                       kprobeevents += self.kprobeText(self.kprobes[kp])
+               for kp in kplist:
+                       kprobeevents += self.kprobeText(kp, self.kprobes[kp])
                self.fsetVal(kprobeevents, 'kprobe_events')
                # verify that the kprobes were set as ordered
                check = self.fgetVal('kprobe_events')
-               linesout = len(kprobeevents.split('\n'))
-               linesack = len(check.split('\n'))
-               if linesack < linesout:
-                       # if not, try appending the kprobes 1 by 1
-                       for kp in self.kprobes:
-                               kprobeevents = self.kprobeText(self.kprobes[kp])
-                               self.fsetVal(kprobeevents, 'kprobe_events', 'a')
+               linesout = len(kprobeevents.split('\n')) - 1
+               linesack = len(check.split('\n')) - 1
+               if output:
+                       res = '%d/%d' % (linesack, linesout)
+                       if linesack < linesout:
+                               res = self.colorText(res, 31)
+                       else:
+                               res = self.colorText(res, 32)
+                       print('    working kprobe functions enabled: %s' % res)
                self.fsetVal('1', 'events/kprobes/enable')
-       def testKprobe(self, kprobe):
-               kprobeevents = self.kprobeText(kprobe)
+       def testKprobe(self, kname, kprobe):
+               self.fsetVal('0', 'events/kprobes/enable')
+               kprobeevents = self.kprobeText(kname, kprobe)
                if not kprobeevents:
                        return False
                try:
@@ -463,8 +516,9 @@ class SystemValues:
                if not os.path.exists(file):
                        return False
                try:
-                       fp = open(file, mode)
+                       fp = open(file, mode, 0)
                        fp.write(val)
+                       fp.flush()
                        fp.close()
                except:
                        pass
@@ -491,21 +545,17 @@ class SystemValues:
                for name in self.dev_tracefuncs:
                        self.defaultKprobe(name, self.dev_tracefuncs[name])
        def isCallgraphFunc(self, name):
-               if len(self.debugfuncs) < 1 and self.suspendmode == 'command':
-                       return True
-               if name in self.debugfuncs:
+               if len(self.tracefuncs) < 1 and self.suspendmode == 'command':
                        return True
-               funclist = []
                for i in self.tracefuncs:
                        if 'func' in self.tracefuncs[i]:
-                               funclist.append(self.tracefuncs[i]['func'])
+                               f = self.tracefuncs[i]['func']
                        else:
-                               funclist.append(i)
-               if name in funclist:
-                       return True
+                               f = i
+                       if name == f:
+                               return True
                return False
        def initFtrace(self, testing=False):
-               tp = self.tpath
                print('INITIALIZING FTRACE...')
                # turn trace off
                self.fsetVal('0', 'tracing_on')
@@ -518,18 +568,7 @@ class SystemValues:
                # go no further if this is just a status check
                if testing:
                        return
-               if self.usekprobes:
-                       # add tracefunc kprobes so long as were not using full callgraph
-                       if(not self.usecallgraph or len(self.debugfuncs) > 0):
-                               for name in self.tracefuncs:
-                                       self.defaultKprobe(name, self.tracefuncs[name])
-                               if self.usedevsrc:
-                                       for name in self.dev_tracefuncs:
-                                               self.defaultKprobe(name, self.dev_tracefuncs[name])
-                       else:
-                               self.usedevsrc = False
-                       self.addKprobes()
-               # initialize the callgraph trace, unless this is an x2 run
+               # initialize the callgraph trace
                if(self.usecallgraph):
                        # set trace type
                        self.fsetVal('function_graph', 'current_tracer')
@@ -545,20 +584,24 @@ class SystemValues:
                        self.fsetVal('context-info', 'trace_options')
                        self.fsetVal('graph-time', 'trace_options')
                        self.fsetVal('0', 'max_graph_depth')
-                       if len(self.debugfuncs) > 0:
-                               self.setFtraceFilterFunctions(self.debugfuncs)
-                       elif self.suspendmode == 'command':
-                               self.fsetVal('', 'set_graph_function')
-                       else:
-                               cf = ['dpm_run_callback']
-                               if(self.usetraceeventsonly):
-                                       cf += ['dpm_prepare', 'dpm_complete']
-                               for fn in self.tracefuncs:
-                                       if 'func' in self.tracefuncs[fn]:
-                                               cf.append(self.tracefuncs[fn]['func'])
-                                       else:
-                                               cf.append(fn)
-                               self.setFtraceFilterFunctions(cf)
+                       cf = ['dpm_run_callback']
+                       if(self.usetraceeventsonly):
+                               cf += ['dpm_prepare', 'dpm_complete']
+                       for fn in self.tracefuncs:
+                               if 'func' in self.tracefuncs[fn]:
+                                       cf.append(self.tracefuncs[fn]['func'])
+                               else:
+                                       cf.append(fn)
+                       self.setFtraceFilterFunctions(cf)
+               # initialize the kprobe trace
+               elif self.usekprobes:
+                       for name in self.tracefuncs:
+                               self.defaultKprobe(name, self.tracefuncs[name])
+                       if self.usedevsrc:
+                               for name in self.dev_tracefuncs:
+                                       self.defaultKprobe(name, self.dev_tracefuncs[name])
+                       print('INITIALIZING KPROBES...')
+                       self.addKprobes(self.verbose)
                if(self.usetraceevents):
                        # turn trace events on
                        events = iter(self.traceevents)
@@ -590,10 +633,10 @@ class SystemValues:
                        if(os.path.exists(tp+f) == False):
                                return False
                return True
-       def colorText(self, str):
+       def colorText(self, str, color=31):
                if not self.ansi:
                        return str
-               return '\x1B[31;40m'+str+'\x1B[m'
+               return '\x1B[%d;40m%s\x1B[m' % (color, str)
 
 sysvals = SystemValues()
 
@@ -625,8 +668,8 @@ class DevProps:
                if self.xtraclass:
                        return ' '+self.xtraclass
                if self.async:
-                       return ' async'
-               return ' sync'
+                       return ' async_device'
+               return ' sync_device'
 
 # Class: DeviceNode
 # Description:
@@ -646,8 +689,6 @@ class DeviceNode:
 #       The primary container for suspend/resume test data. There is one for
 #       each test run. The data is organized into a cronological hierarchy:
 #       Data.dmesg {
-#              root structure, started as dmesg & ftrace, but now only ftrace
-#              contents: times for suspend start/end, resume start/end, fwdata
 #              phases {
 #                      10 sequential, non-overlapping phases of S/R
 #                      contents: times for phase start/end, order/color data for html
@@ -658,7 +699,7 @@ class DeviceNode:
 #                                      contents: start/stop times, pid/cpu/driver info
 #                                              parents/children, html id for timeline/callgraph
 #                                              optionally includes an ftrace callgraph
-#                                              optionally includes intradev trace events
+#                                              optionally includes dev/ps data
 #                              }
 #                      }
 #              }
@@ -671,19 +712,24 @@ class Data:
        end = 0.0   # test end
        tSuspended = 0.0 # low-level suspend start
        tResumed = 0.0   # low-level resume start
+       tKernSus = 0.0   # kernel level suspend start
+       tKernRes = 0.0   # kernel level resume end
        tLow = 0.0       # time spent in low-level suspend (standby/freeze)
        fwValid = False  # is firmware data available
        fwSuspend = 0    # time spent in firmware suspend
        fwResume = 0     # time spent in firmware resume
        dmesgtext = []   # dmesg text file in memory
+       pstl = 0         # process timeline
        testnumber = 0
        idstr = ''
        html_device_id = 0
        stamp = 0
        outfile = ''
-       dev_ubiquitous = ['msleep', 'udelay']
+       devpids = []
+       kerror = False
        def __init__(self, num):
-               idchar = 'abcdefghijklmnopqrstuvwxyz'
+               idchar = 'abcdefghij'
+               self.pstl = dict()
                self.testnumber = num
                self.idstr = idchar[num]
                self.dmesgtext = []
@@ -714,16 +760,39 @@ class Data:
                self.devicegroups = []
                for phase in self.phases:
                        self.devicegroups.append([phase])
-       def getStart(self):
-               return self.dmesg[self.phases[0]]['start']
+               self.errorinfo = {'suspend':[],'resume':[]}
+       def extractErrorInfo(self, dmesg):
+               error = ''
+               tm = 0.0
+               for i in range(len(dmesg)):
+                       if 'Call Trace:' in dmesg[i]:
+                               m = re.match('[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) .*', dmesg[i])
+                               if not m:
+                                       continue
+                               tm = float(m.group('ktime'))
+                               if tm < self.start or tm > self.end:
+                                       continue
+                               for j in range(i-10, i+1):
+                                       error += dmesg[j]
+                               continue
+                       if error:
+                               m = re.match('[ \t]*\[ *[0-9\.]*\]  \[\<[0-9a-fA-F]*\>\] .*', dmesg[i])
+                               if m:
+                                       error += dmesg[i]
+                               else:
+                                       if tm < self.tSuspended:
+                                               dir = 'suspend'
+                                       else:
+                                               dir = 'resume'
+                                       error = error.replace('<', '&lt').replace('>', '&gt')
+                                       vprint('kernel error found in %s at %f' % (dir, tm))
+                                       self.errorinfo[dir].append((tm, error))
+                                       self.kerror = True
+                                       error = ''
        def setStart(self, time):
                self.start = time
-               self.dmesg[self.phases[0]]['start'] = time
-       def getEnd(self):
-               return self.dmesg[self.phases[-1]]['end']
        def setEnd(self, time):
                self.end = time
-               self.dmesg[self.phases[-1]]['end'] = time
        def isTraceEventOutsideDeviceCalls(self, pid, time):
                for phase in self.phases:
                        list = self.dmesg[phase]['list']
@@ -733,39 +802,67 @@ class Data:
                                        time < d['end']):
                                        return False
                return True
-       def targetDevice(self, phaselist, start, end, pid=-1):
+       def sourcePhase(self, start):
+               for phase in self.phases:
+                       pend = self.dmesg[phase]['end']
+                       if start <= pend:
+                               return phase
+               return 'resume_complete'
+       def sourceDevice(self, phaselist, start, end, pid, type):
                tgtdev = ''
                for phase in phaselist:
                        list = self.dmesg[phase]['list']
                        for devname in list:
                                dev = list[devname]
-                               if(pid >= 0 and dev['pid'] != pid):
+                               # pid must match
+                               if dev['pid'] != pid:
                                        continue
                                devS = dev['start']
                                devE = dev['end']
-                               if(start < devS or start >= devE or end <= devS or end > devE):
-                                       continue
+                               if type == 'device':
+                                       # device target event is entirely inside the source boundary
+                                       if(start < devS or start >= devE or end <= devS or end > devE):
+                                               continue
+                               elif type == 'thread':
+                                       # thread target event will expand the source boundary
+                                       if start < devS:
+                                               dev['start'] = start
+                                       if end > devE:
+                                               dev['end'] = end
                                tgtdev = dev
                                break
                return tgtdev
        def addDeviceFunctionCall(self, displayname, kprobename, proc, pid, start, end, cdata, rdata):
-               machstart = self.dmesg['suspend_machine']['start']
-               machend = self.dmesg['resume_machine']['end']
-               tgtdev = self.targetDevice(self.phases, start, end, pid)
-               if not tgtdev and start >= machstart and end < machend:
-                       # device calls in machine phases should be serial
-                       tgtdev = self.targetDevice(['suspend_machine', 'resume_machine'], start, end)
+               # try to place the call in a device
+               tgtdev = self.sourceDevice(self.phases, start, end, pid, 'device')
+               # calls with device pids that occur outside device bounds are dropped
+               # TODO: include these somehow
+               if not tgtdev and pid in self.devpids:
+                       return False
+               # try to place the call in a thread
                if not tgtdev:
-                       if 'scsi_eh' in proc:
-                               self.newActionGlobal(proc, start, end, pid)
-                               self.addDeviceFunctionCall(displayname, kprobename, proc, pid, start, end, cdata, rdata)
+                       tgtdev = self.sourceDevice(self.phases, start, end, pid, 'thread')
+               # create new thread blocks, expand as new calls are found
+               if not tgtdev:
+                       if proc == '<...>':
+                               threadname = 'kthread-%d' % (pid)
                        else:
-                               vprint('IGNORE: %s[%s](%d) [%f - %f] | %s | %s | %s' % (displayname, kprobename,
-                                       pid, start, end, cdata, rdata, proc))
+                               threadname = '%s-%d' % (proc, pid)
+                       tgtphase = self.sourcePhase(start)
+                       self.newAction(tgtphase, threadname, pid, '', start, end, '', ' kth', '')
+                       return self.addDeviceFunctionCall(displayname, kprobename, proc, pid, start, end, cdata, rdata)
+               # this should not happen
+               if not tgtdev:
+                       vprint('[%f - %f] %s-%d %s %s %s' % \
+                               (start, end, proc, pid, kprobename, cdata, rdata))
                        return False
-               # detail block fits within tgtdev
+               # place the call data inside the src element of the tgtdev
                if('src' not in tgtdev):
                        tgtdev['src'] = []
+               dtf = sysvals.dev_tracefuncs
+               ubiquitous = False
+               if kprobename in dtf and 'ub' in dtf[kprobename]:
+                       ubiquitous = True
                title = cdata+' '+rdata
                mstr = '\(.*\) *(?P<args>.*) *\((?P<caller>.*)\+.* arg1=(?P<ret>.*)'
                m = re.match(mstr, title)
@@ -777,14 +874,81 @@ class Data:
                                r = ''
                        else:
                                r = 'ret=%s ' % r
-                       l = '%0.3fms' % ((end - start) * 1000)
-                       if kprobename in self.dev_ubiquitous:
-                               title = '%s(%s) <- %s, %s(%s)' % (displayname, a, c, r, l)
-                       else:
-                               title = '%s(%s) %s(%s)' % (displayname, a, r, l)
-               e = TraceEvent(title, kprobename, start, end - start)
+                       if ubiquitous and c in dtf and 'ub' in dtf[c]:
+                               return False
+               color = sysvals.kprobeColor(kprobename)
+               e = DevFunction(displayname, a, c, r, start, end, ubiquitous, proc, pid, color)
                tgtdev['src'].append(e)
                return True
+       def overflowDevices(self):
+               # get a list of devices that extend beyond the end of this test run
+               devlist = []
+               for phase in self.phases:
+                       list = self.dmesg[phase]['list']
+                       for devname in list:
+                               dev = list[devname]
+                               if dev['end'] > self.end:
+                                       devlist.append(dev)
+               return devlist
+       def mergeOverlapDevices(self, devlist):
+               # merge any devices that overlap devlist
+               for dev in devlist:
+                       devname = dev['name']
+                       for phase in self.phases:
+                               list = self.dmesg[phase]['list']
+                               if devname not in list:
+                                       continue
+                               tdev = list[devname]
+                               o = min(dev['end'], tdev['end']) - max(dev['start'], tdev['start'])
+                               if o <= 0:
+                                       continue
+                               dev['end'] = tdev['end']
+                               if 'src' not in dev or 'src' not in tdev:
+                                       continue
+                               dev['src'] += tdev['src']
+                               del list[devname]
+       def usurpTouchingThread(self, name, dev):
+               # the caller test has priority of this thread, give it to him
+               for phase in self.phases:
+                       list = self.dmesg[phase]['list']
+                       if name in list:
+                               tdev = list[name]
+                               if tdev['start'] - dev['end'] < 0.1:
+                                       dev['end'] = tdev['end']
+                                       if 'src' not in dev:
+                                               dev['src'] = []
+                                       if 'src' in tdev:
+                                               dev['src'] += tdev['src']
+                                       del list[name]
+                               break
+       def stitchTouchingThreads(self, testlist):
+               # merge any threads between tests that touch
+               for phase in self.phases:
+                       list = self.dmesg[phase]['list']
+                       for devname in list:
+                               dev = list[devname]
+                               if 'htmlclass' not in dev or 'kth' not in dev['htmlclass']:
+                                       continue
+                               for data in testlist:
+                                       data.usurpTouchingThread(devname, dev)
+       def optimizeDevSrc(self):
+               # merge any src call loops to reduce timeline size
+               for phase in self.phases:
+                       list = self.dmesg[phase]['list']
+                       for dev in list:
+                               if 'src' not in list[dev]:
+                                       continue
+                               src = list[dev]['src']
+                               p = 0
+                               for e in sorted(src, key=lambda event: event.time):
+                                       if not p or not e.repeat(p):
+                                               p = e
+                                               continue
+                                       # e is another iteration of p, move it into p
+                                       p.end = e.end
+                                       p.length = p.end - p.time
+                                       p.count += 1
+                                       src.remove(e)
        def trimTimeVal(self, t, t0, dT, left):
                if left:
                        if(t > t0):
@@ -804,6 +968,8 @@ class Data:
                self.tSuspended = self.trimTimeVal(self.tSuspended, t0, dT, left)
                self.tResumed = self.trimTimeVal(self.tResumed, t0, dT, left)
                self.start = self.trimTimeVal(self.start, t0, dT, left)
+               self.tKernSus = self.trimTimeVal(self.tKernSus, t0, dT, left)
+               self.tKernRes = self.trimTimeVal(self.tKernRes, t0, dT, left)
                self.end = self.trimTimeVal(self.end, t0, dT, left)
                for phase in self.phases:
                        p = self.dmesg[phase]
@@ -832,36 +998,6 @@ class Data:
                        else:
                                self.trimTime(self.tSuspended, \
                                        self.tResumed-self.tSuspended, False)
-       def newPhaseWithSingleAction(self, phasename, devname, start, end, color):
-               for phase in self.phases:
-                       self.dmesg[phase]['order'] += 1
-               self.html_device_id += 1
-               devid = '%s%d' % (self.idstr, self.html_device_id)
-               list = dict()
-               list[devname] = \
-                       {'start': start, 'end': end, 'pid': 0, 'par': '',
-                       'length': (end-start), 'row': 0, 'id': devid, 'drv': '' };
-               self.dmesg[phasename] = \
-                       {'list': list, 'start': start, 'end': end,
-                       'row': 0, 'color': color, 'order': 0}
-               self.phases = self.sortedPhases()
-       def newPhase(self, phasename, start, end, color, order):
-               if(order < 0):
-                       order = len(self.phases)
-               for phase in self.phases[order:]:
-                       self.dmesg[phase]['order'] += 1
-               if(order > 0):
-                       p = self.phases[order-1]
-                       self.dmesg[p]['end'] = start
-               if(order < len(self.phases)):
-                       p = self.phases[order]
-                       self.dmesg[p]['start'] = end
-               list = dict()
-               self.dmesg[phasename] = \
-                       {'list': list, 'start': start, 'end': end,
-                       'row': 0, 'color': color, 'order': order}
-               self.phases = self.sortedPhases()
-               self.devicegroups.append([phasename])
        def setPhase(self, phase, ktime, isbegin):
                if(isbegin):
                        self.dmesg[phase]['start'] = ktime
@@ -881,7 +1017,7 @@ class Data:
                for t in sorted(tmp):
                        slist.append(tmp[t])
                return slist
-       def fixupInitcalls(self, phase, end):
+       def fixupInitcalls(self, phase):
                # if any calls never returned, clip them at system resume end
                phaselist = self.dmesg[phase]['list']
                for devname in phaselist:
@@ -893,37 +1029,23 @@ class Data:
                                                break
                                vprint('%s (%s): callback didnt return' % (devname, phase))
        def deviceFilter(self, devicefilter):
-               # remove all by the relatives of the filter devnames
-               filter = []
-               for phase in self.phases:
-                       list = self.dmesg[phase]['list']
-                       for name in devicefilter:
-                               dev = name
-                               while(dev in list):
-                                       if(dev not in filter):
-                                               filter.append(dev)
-                                       dev = list[dev]['par']
-                               children = self.deviceDescendants(name, phase)
-                               for dev in children:
-                                       if(dev not in filter):
-                                               filter.append(dev)
                for phase in self.phases:
                        list = self.dmesg[phase]['list']
                        rmlist = []
                        for name in list:
-                               pid = list[name]['pid']
-                               if(name not in filter and pid >= 0):
+                               keep = False
+                               for filter in devicefilter:
+                                       if filter in name or \
+                                               ('drv' in list[name] and filter in list[name]['drv']):
+                                               keep = True
+                               if not keep:
                                        rmlist.append(name)
                        for name in rmlist:
                                del list[name]
        def fixupInitcallsThatDidntReturn(self):
                # if any calls never returned, clip them at system resume end
                for phase in self.phases:
-                       self.fixupInitcalls(phase, self.getEnd())
-       def isInsideTimeline(self, start, end):
-               if(self.start <= start and self.end > start):
-                       return True
-               return False
+                       self.fixupInitcalls(phase)
        def phaseOverlap(self, phases):
                rmgroups = []
                newgroup = []
@@ -940,30 +1062,35 @@ class Data:
                        self.devicegroups.remove(group)
                self.devicegroups.append(newgroup)
        def newActionGlobal(self, name, start, end, pid=-1, color=''):
-               # if event starts before timeline start, expand timeline
-               if(start < self.start):
-                       self.setStart(start)
-               # if event ends after timeline end, expand the timeline
-               if(end > self.end):
-                       self.setEnd(end)
-               # which phase is this device callback or action "in"
-               targetphase = "none"
+               # which phase is this device callback or action in
+               targetphase = 'none'
                htmlclass = ''
                overlap = 0.0
                phases = []
                for phase in self.phases:
                        pstart = self.dmesg[phase]['start']
                        pend = self.dmesg[phase]['end']
+                       # see if the action overlaps this phase
                        o = max(0, min(end, pend) - max(start, pstart))
                        if o > 0:
                                phases.append(phase)
+                       # set the target phase to the one that overlaps most
                        if o > overlap:
                                if overlap > 0 and phase == 'post_resume':
                                        continue
                                targetphase = phase
                                overlap = o
+               # if no target phase was found, pin it to the edge
+               if targetphase == 'none':
+                       p0start = self.dmesg[self.phases[0]]['start']
+                       if start <= p0start:
+                               targetphase = self.phases[0]
+                       else:
+                               targetphase = self.phases[-1]
                if pid == -2:
                        htmlclass = ' bg'
+               elif pid == -3:
+                       htmlclass = ' ps'
                if len(phases) > 1:
                        htmlclass = ' bg'
                        self.phaseOverlap(phases)
@@ -985,29 +1112,13 @@ class Data:
                        while(name in list):
                                name = '%s[%d]' % (origname, i)
                                i += 1
-               list[name] = {'start': start, 'end': end, 'pid': pid, 'par': parent,
-                                         'length': length, 'row': 0, 'id': devid, 'drv': drv }
+               list[name] = {'name': name, 'start': start, 'end': end, 'pid': pid,
+                       'par': parent, 'length': length, 'row': 0, 'id': devid, 'drv': drv }
                if htmlclass:
                        list[name]['htmlclass'] = htmlclass
                if color:
                        list[name]['color'] = color
                return name
-       def deviceIDs(self, devlist, phase):
-               idlist = []
-               list = self.dmesg[phase]['list']
-               for devname in list:
-                       if devname in devlist:
-                               idlist.append(list[devname]['id'])
-               return idlist
-       def deviceParentID(self, devname, phase):
-               pdev = ''
-               pdevid = ''
-               list = self.dmesg[phase]['list']
-               if devname in list:
-                       pdev = list[devname]['par']
-               if pdev in list:
-                       return list[pdev]['id']
-               return pdev
        def deviceChildren(self, devname, phase):
                devlist = []
                list = self.dmesg[phase]['list']
@@ -1015,21 +1126,15 @@ class Data:
                        if(list[child]['par'] == devname):
                                devlist.append(child)
                return devlist
-       def deviceDescendants(self, devname, phase):
-               children = self.deviceChildren(devname, phase)
-               family = children
-               for child in children:
-                       family += self.deviceDescendants(child, phase)
-               return family
-       def deviceChildrenIDs(self, devname, phase):
-               devlist = self.deviceChildren(devname, phase)
-               return self.deviceIDs(devlist, phase)
        def printDetails(self):
+               vprint('Timeline Details:')
                vprint('          test start: %f' % self.start)
+               vprint('kernel suspend start: %f' % self.tKernSus)
                for phase in self.phases:
                        dc = len(self.dmesg[phase]['list'])
                        vprint('    %16s: %f - %f (%d devices)' % (phase, \
                                self.dmesg[phase]['start'], self.dmesg[phase]['end'], dc))
+               vprint('   kernel resume end: %f' % self.tKernRes)
                vprint('            test end: %f' % self.end)
        def deviceChildrenAllPhases(self, devname):
                devlist = []
@@ -1108,21 +1213,134 @@ class Data:
                                if width != '0.000000' and length >= mindevlen:
                                        devlist.append(dev)
                        self.tdevlist[phase] = devlist
-
-# Class: TraceEvent
+       def addHorizontalDivider(self, devname, devend):
+               phase = 'suspend_prepare'
+               self.newAction(phase, devname, -2, '', \
+                       self.start, devend, '', ' sec', '')
+               if phase not in self.tdevlist:
+                       self.tdevlist[phase] = []
+               self.tdevlist[phase].append(devname)
+               d = DevItem(0, phase, self.dmesg[phase]['list'][devname])
+               return d
+       def addProcessUsageEvent(self, name, times):
+               # get the start and end times for this process
+               maxC = 0
+               tlast = 0
+               start = -1
+               end = -1
+               for t in sorted(times):
+                       if tlast == 0:
+                               tlast = t
+                               continue
+                       if name in self.pstl[t]:
+                               if start == -1 or tlast < start:
+                                       start = tlast
+                               if end == -1 or t > end:
+                                       end = t
+                       tlast = t
+               if start == -1 or end == -1:
+                       return 0
+               # add a new action for this process and get the object
+               out = self.newActionGlobal(name, start, end, -3)
+               if not out:
+                       return 0
+               phase, devname = out
+               dev = self.dmesg[phase]['list'][devname]
+               # get the cpu exec data
+               tlast = 0
+               clast = 0
+               cpuexec = dict()
+               for t in sorted(times):
+                       if tlast == 0 or t <= start or t > end:
+                               tlast = t
+                               continue
+                       list = self.pstl[t]
+                       c = 0
+                       if name in list:
+                               c = list[name]
+                       if c > maxC:
+                               maxC = c
+                       if c != clast:
+                               key = (tlast, t)
+                               cpuexec[key] = c
+                               tlast = t
+                               clast = c
+               dev['cpuexec'] = cpuexec
+               return maxC
+       def createProcessUsageEvents(self):
+               # get an array of process names
+               proclist = []
+               for t in self.pstl:
+                       pslist = self.pstl[t]
+                       for ps in pslist:
+                               if ps not in proclist:
+                                       proclist.append(ps)
+               # get a list of data points for suspend and resume
+               tsus = []
+               tres = []
+               for t in sorted(self.pstl):
+                       if t < self.tSuspended:
+                               tsus.append(t)
+                       else:
+                               tres.append(t)
+               # process the events for suspend and resume
+               if len(proclist) > 0:
+                       vprint('Process Execution:')
+               for ps in proclist:
+                       c = self.addProcessUsageEvent(ps, tsus)
+                       if c > 0:
+                               vprint('%25s (sus): %d' % (ps, c))
+                       c = self.addProcessUsageEvent(ps, tres)
+                       if c > 0:
+                               vprint('%25s (res): %d' % (ps, c))
+
+# Class: DevFunction
 # Description:
-#       A container for trace event data found in the ftrace file
-class TraceEvent:
-       text = ''
-       time = 0.0
-       length = 0.0
-       title = ''
+#       A container for kprobe function data we want in the dev timeline
+class DevFunction:
        row = 0
-       def __init__(self, a, n, t, l):
-               self.title = a
-               self.text = n
-               self.time = t
-               self.length = l
+       count = 1
+       def __init__(self, name, args, caller, ret, start, end, u, proc, pid, color):
+               self.name = name
+               self.args = args
+               self.caller = caller
+               self.ret = ret
+               self.time = start
+               self.length = end - start
+               self.end = end
+               self.ubiquitous = u
+               self.proc = proc
+               self.pid = pid
+               self.color = color
+       def title(self):
+               cnt = ''
+               if self.count > 1:
+                       cnt = '(x%d)' % self.count
+               l = '%0.3fms' % (self.length * 1000)
+               if self.ubiquitous:
+                       title = '%s(%s)%s <- %s, %s(%s)' % \
+                               (self.name, self.args, cnt, self.caller, self.ret, l)
+               else:
+                       title = '%s(%s) %s%s(%s)' % (self.name, self.args, self.ret, cnt, l)
+               return title.replace('"', '')
+       def text(self):
+               if self.count > 1:
+                       text = '%s(x%d)' % (self.name, self.count)
+               else:
+                       text = self.name
+               return text
+       def repeat(self, tgt):
+               # is the tgt call just a repeat of this call (e.g. are we in a loop)
+               dt = self.time - tgt.end
+               # only combine calls if -all- attributes are identical
+               if tgt.caller == self.caller and \
+                       tgt.name == self.name and tgt.args == self.args and \
+                       tgt.proc == self.proc and tgt.pid == self.pid and \
+                       tgt.ret == self.ret and dt >= 0 and \
+                       dt <= sysvals.callloopmaxgap and \
+                       self.length < sysvals.callloopmaxlen:
+                       return True
+               return False
 
 # Class: FTraceLine
 # Description:
@@ -1226,7 +1444,6 @@ class FTraceLine:
                        print('%s -- %f (%02d): %s() { (%.3f us)' % (dev, self.time, \
                                self.depth, self.name, self.length*1000000))
        def startMarker(self):
-               global sysvals
                # Is this the starting line of a suspend?
                if not self.fevent:
                        return False
@@ -1506,6 +1723,16 @@ class FTraceCallGraph:
                                        l.depth, l.name, l.length*1000000))
                print(' ')
 
+class DevItem:
+       def __init__(self, test, phase, dev):
+               self.test = test
+               self.phase = phase
+               self.dev = dev
+       def isa(self, cls):
+               if 'htmlclass' in self.dev and cls in self.dev['htmlclass']:
+                       return True
+               return False
+
 # Class: Timeline
 # Description:
 #       A container for a device timeline which calculates
@@ -1517,12 +1744,11 @@ class Timeline:
        rowH = 30       # device row height
        bodyH = 0       # body height
        rows = 0        # total timeline rows
-       phases = []
-       rowmaxlines = dict()
-       rowcount = dict()
+       rowlines = dict()
        rowheight = dict()
-       def __init__(self, rowheight):
+       def __init__(self, rowheight, scaleheight):
                self.rowH = rowheight
+               self.scaleH = scaleheight
                self.html = {
                        'header': '',
                        'timeline': '',
@@ -1537,21 +1763,19 @@ class Timeline:
        #        The total number of rows needed to display this phase of the timeline
        def getDeviceRows(self, rawlist):
                # clear all rows and set them to undefined
-               lendict = dict()
+               sortdict = dict()
                for item in rawlist:
                        item.row = -1
-                       lendict[item] = item.length
-               list = []
-               for i in sorted(lendict, key=lendict.get, reverse=True):
-                       list.append(i)
-               remaining = len(list)
+                       sortdict[item] = item.length
+               sortlist = sorted(sortdict, key=sortdict.get, reverse=True)
+               remaining = len(sortlist)
                rowdata = dict()
                row = 1
                # try to pack each row with as many ranges as possible
                while(remaining > 0):
                        if(row not in rowdata):
                                rowdata[row] = []
-                       for i in list:
+                       for i in sortlist:
                                if(i.row >= 0):
                                        continue
                                s = i.time
@@ -1575,81 +1799,86 @@ class Timeline:
        #        Organize the timeline entries into the smallest
        #        number of rows possible, with no entry overlapping
        # Arguments:
-       #        list: the list of devices/actions for a single phase
-       #        devlist: string list of device names to use
+       #        devlist: the list of devices/actions in a group of contiguous phases
        # Output:
        #        The total number of rows needed to display this phase of the timeline
-       def getPhaseRows(self, dmesg, devlist):
+       def getPhaseRows(self, devlist, row=0):
                # clear all rows and set them to undefined
                remaining = len(devlist)
                rowdata = dict()
-               row = 0
-               lendict = dict()
+               sortdict = dict()
                myphases = []
+               # initialize all device rows to -1 and calculate devrows
                for item in devlist:
-                       if item[0] not in self.phases:
-                               self.phases.append(item[0])
-                       if item[0] not in myphases:
-                               myphases.append(item[0])
-                               self.rowmaxlines[item[0]] = dict()
-                               self.rowheight[item[0]] = dict()
-                       dev = dmesg[item[0]]['list'][item[1]]
+                       dev = item.dev
+                       tp = (item.test, item.phase)
+                       if tp not in myphases:
+                               myphases.append(tp)
                        dev['row'] = -1
-                       lendict[item] = float(dev['end']) - float(dev['start'])
+                       # sort by length 1st, then name 2nd
+                       sortdict[item] = (float(dev['end']) - float(dev['start']), item.dev['name'])
                        if 'src' in dev:
                                dev['devrows'] = self.getDeviceRows(dev['src'])
-               lenlist = []
-               for i in sorted(lendict, key=lendict.get, reverse=True):
-                       lenlist.append(i)
+               # sort the devlist by length so that large items graph on top
+               sortlist = sorted(sortdict, key=sortdict.get, reverse=True)
                orderedlist = []
-               for item in lenlist:
-                       dev = dmesg[item[0]]['list'][item[1]]
-                       if dev['pid'] == -2:
+               for item in sortlist:
+                       if item.dev['pid'] == -2:
                                orderedlist.append(item)
-               for item in lenlist:
+               for item in sortlist:
                        if item not in orderedlist:
                                orderedlist.append(item)
-               # try to pack each row with as many ranges as possible
+               # try to pack each row with as many devices as possible
                while(remaining > 0):
                        rowheight = 1
                        if(row not in rowdata):
                                rowdata[row] = []
                        for item in orderedlist:
-                               dev = dmesg[item[0]]['list'][item[1]]
+                               dev = item.dev
                                if(dev['row'] < 0):
                                        s = dev['start']
                                        e = dev['end']
                                        valid = True
                                        for ritem in rowdata[row]:
-                                               rs = ritem['start']
-                                               re = ritem['end']
+                                               rs = ritem.dev['start']
+                                               re = ritem.dev['end']
                                                if(not (((s <= rs) and (e <= rs)) or
                                                        ((s >= re) and (e >= re)))):
                                                        valid = False
                                                        break
                                        if(valid):
-                                               rowdata[row].append(dev)
+                                               rowdata[row].append(item)
                                                dev['row'] = row
                                                remaining -= 1
                                                if 'devrows' in dev and dev['devrows'] > rowheight:
                                                        rowheight = dev['devrows']
-                       for phase in myphases:
-                               self.rowmaxlines[phase][row] = rowheight
-                               self.rowheight[phase][row] = rowheight * self.rowH
+                       for t, p in myphases:
+                               if t not in self.rowlines or t not in self.rowheight:
+                                       self.rowlines[t] = dict()
+                                       self.rowheight[t] = dict()
+                               if p not in self.rowlines[t] or p not in self.rowheight[t]:
+                                       self.rowlines[t][p] = dict()
+                                       self.rowheight[t][p] = dict()
+                               rh = self.rowH
+                               # section headers should use a different row height
+                               if len(rowdata[row]) == 1 and \
+                                       'htmlclass' in rowdata[row][0].dev and \
+                                       'sec' in rowdata[row][0].dev['htmlclass']:
+                                       rh = 15
+                               self.rowlines[t][p][row] = rowheight
+                               self.rowheight[t][p][row] = rowheight * rh
                        row += 1
                if(row > self.rows):
                        self.rows = int(row)
-               for phase in myphases:
-                       self.rowcount[phase] = row
                return row
-       def phaseRowHeight(self, phase, row):
-               return self.rowheight[phase][row]
-       def phaseRowTop(self, phase, row):
+       def phaseRowHeight(self, test, phase, row):
+               return self.rowheight[test][phase][row]
+       def phaseRowTop(self, test, phase, row):
                top = 0
-               for i in sorted(self.rowheight[phase]):
+               for i in sorted(self.rowheight[test][phase]):
                        if i >= row:
                                break
-                       top += self.rowheight[phase][i]
+                       top += self.rowheight[test][phase][i]
                return top
        # Function: calcTotalRows
        # Description:
@@ -1657,19 +1886,21 @@ class Timeline:
        def calcTotalRows(self):
                maxrows = 0
                standardphases = []
-               for phase in self.phases:
-                       total = 0
-                       for i in sorted(self.rowmaxlines[phase]):
-                               total += self.rowmaxlines[phase][i]
-                       if total > maxrows:
-                               maxrows = total
-                       if total == self.rowcount[phase]:
-                               standardphases.append(phase)
+               for t in self.rowlines:
+                       for p in self.rowlines[t]:
+                               total = 0
+                               for i in sorted(self.rowlines[t][p]):
+                                       total += self.rowlines[t][p][i]
+                               if total > maxrows:
+                                       maxrows = total
+                               if total == len(self.rowlines[t][p]):
+                                       standardphases.append((t, p))
                self.height = self.scaleH + (maxrows*self.rowH)
                self.bodyH = self.height - self.scaleH
-               for phase in standardphases:
-                       for i in sorted(self.rowheight[phase]):
-                               self.rowheight[phase][i] = self.bodyH/self.rowcount[phase]
+               # if there is 1 line per row, draw them the standard way
+               for t, p in standardphases:
+                       for i in sorted(self.rowheight[t][p]):
+                               self.rowheight[t][p][i] = self.bodyH/len(self.rowlines[t][p])
        # Function: createTimeScale
        # Description:
        #        Create the timescale for a timeline block
@@ -1716,7 +1947,6 @@ class Timeline:
 #       A list of values describing the properties of these test runs
 class TestProps:
        stamp = ''
-       tracertype = ''
        S0i3 = False
        fwdata = []
        ftrace_line_fmt_fg = \
@@ -1734,14 +1964,13 @@ class TestProps:
        def __init__(self):
                self.ktemp = dict()
        def setTracerType(self, tracer):
-               self.tracertype = tracer
                if(tracer == 'function_graph'):
                        self.cgformat = True
                        self.ftrace_line_fmt = self.ftrace_line_fmt_fg
                elif(tracer == 'nop'):
                        self.ftrace_line_fmt = self.ftrace_line_fmt_nop
                else:
-                       doError('Invalid tracer format: [%s]' % tracer, False)
+                       doError('Invalid tracer format: [%s]' % tracer)
 
 # Class: TestRun
 # Description:
@@ -1756,6 +1985,51 @@ class TestRun:
                self.ftemp = dict()
                self.ttemp = dict()
 
+class ProcessMonitor:
+       proclist = dict()
+       running = False
+       def procstat(self):
+               c = ['cat /proc/[1-9]*/stat 2>/dev/null']
+               process = Popen(c, shell=True, stdout=PIPE)
+               running = dict()
+               for line in process.stdout:
+                       data = line.split()
+                       pid = data[0]
+                       name = re.sub('[()]', '', data[1])
+                       user = int(data[13])
+                       kern = int(data[14])
+                       kjiff = ujiff = 0
+                       if pid not in self.proclist:
+                               self.proclist[pid] = {'name' : name, 'user' : user, 'kern' : kern}
+                       else:
+                               val = self.proclist[pid]
+                               ujiff = user - val['user']
+                               kjiff = kern - val['kern']
+                               val['user'] = user
+                               val['kern'] = kern
+                       if ujiff > 0 or kjiff > 0:
+                               running[pid] = ujiff + kjiff
+               result = process.wait()
+               out = ''
+               for pid in running:
+                       jiffies = running[pid]
+                       val = self.proclist[pid]
+                       if out:
+                               out += ','
+                       out += '%s-%s %d' % (val['name'], pid, jiffies)
+               return 'ps - '+out
+       def processMonitor(self, tid):
+               while self.running:
+                       out = self.procstat()
+                       if out:
+                               sysvals.fsetVal(out, 'trace_marker')
+       def start(self):
+               self.thread = Thread(target=self.processMonitor, args=(0,))
+               self.running = True
+               self.thread.start()
+       def stop(self):
+               self.running = False
+
 # ----------------- FUNCTIONS --------------------
 
 # Function: vprint
@@ -1764,7 +2038,7 @@ class TestRun:
 # Arguments:
 #       msg: the debug/log message to print
 def vprint(msg):
-       global sysvals
+       sysvals.logmsg += msg+'\n'
        if(sysvals.verbose):
                print(msg)
 
@@ -1775,8 +2049,6 @@ def vprint(msg):
 # Arguments:
 #       m: the valid re.match output for the stamp line
 def parseStamp(line, data):
-       global sysvals
-
        m = re.match(sysvals.stampfmt, line)
        data.stamp = {'time': '', 'host': '', 'mode': ''}
        dt = datetime(int(m.group('y'))+2000, int(m.group('m')),
@@ -1788,6 +2060,14 @@ def parseStamp(line, data):
        data.stamp['kernel'] = m.group('kernel')
        sysvals.hostname = data.stamp['host']
        sysvals.suspendmode = data.stamp['mode']
+       if sysvals.suspendmode == 'command' and sysvals.ftracefile != '':
+               modes = ['on', 'freeze', 'standby', 'mem']
+               out = Popen(['grep', 'suspend_enter', sysvals.ftracefile],
+                       stderr=PIPE, stdout=PIPE).stdout.read()
+               m = re.match('.* suspend_enter\[(?P<mode>.*)\]', out)
+               if m and m.group('mode') in ['1', '2', '3']:
+                       sysvals.suspendmode = modes[int(m.group('mode'))]
+                       data.stamp['mode'] = sysvals.suspendmode
        if not sysvals.stamp:
                sysvals.stamp = data.stamp
 
@@ -1817,18 +2097,17 @@ def diffStamp(stamp1, stamp2):
 #       required for primary parsing. Set the usetraceevents and/or
 #       usetraceeventsonly flags in the global sysvals object
 def doesTraceLogHaveTraceEvents():
-       global sysvals
-
        # check for kprobes
        sysvals.usekprobes = False
-       out = os.system('grep -q "_cal: (" '+sysvals.ftracefile)
+       out = call('grep -q "_cal: (" '+sysvals.ftracefile, shell=True)
        if(out == 0):
                sysvals.usekprobes = True
        # check for callgraph data on trace event blocks
-       out = os.system('grep -q "_cpu_down()" '+sysvals.ftracefile)
+       out = call('grep -q "_cpu_down()" '+sysvals.ftracefile, shell=True)
        if(out == 0):
                sysvals.usekprobes = True
-       out = os.popen('head -1 '+sysvals.ftracefile).read().replace('\n', '')
+       out = Popen(['head', '-1', sysvals.ftracefile],
+               stderr=PIPE, stdout=PIPE).stdout.read().replace('\n', '')
        m = re.match(sysvals.stampfmt, out)
        if m and m.group('mode') == 'command':
                sysvals.usetraceeventsonly = True
@@ -1838,14 +2117,14 @@ def doesTraceLogHaveTraceEvents():
        sysvals.usetraceeventsonly = True
        sysvals.usetraceevents = False
        for e in sysvals.traceevents:
-               out = os.system('grep -q "'+e+': " '+sysvals.ftracefile)
+               out = call('grep -q "'+e+': " '+sysvals.ftracefile, shell=True)
                if(out != 0):
                        sysvals.usetraceeventsonly = False
                if(e == 'suspend_resume' and out == 0):
                        sysvals.usetraceevents = True
        # determine is this log is properly formatted
        for e in ['SUSPEND START', 'RESUME COMPLETE']:
-               out = os.system('grep -q "'+e+'" '+sysvals.ftracefile)
+               out = call('grep -q "'+e+'" '+sysvals.ftracefile, shell=True)
                if(out != 0):
                        sysvals.usetracemarkers = False
 
@@ -1860,8 +2139,6 @@ def doesTraceLogHaveTraceEvents():
 # Arguments:
 #       testruns: the array of Data objects obtained from parseKernelLog
 def appendIncompleteTraceLog(testruns):
-       global sysvals
-
        # create TestRun vessels for ftrace parsing
        testcnt = len(testruns)
        testidx = 0
@@ -2052,8 +2329,7 @@ def appendIncompleteTraceLog(testruns):
                                                                dev['ftrace'] = cg
                                                break
 
-               if(sysvals.verbose):
-                       test.data.printDetails()
+               test.data.printDetails()
 
 # Function: parseTraceLog
 # Description:
@@ -2064,14 +2340,12 @@ def appendIncompleteTraceLog(testruns):
 # Output:
 #       An array of Data objects
 def parseTraceLog():
-       global sysvals
-
        vprint('Analyzing the ftrace data...')
        if(os.path.exists(sysvals.ftracefile) == False):
-               doError('%s does not exist' % sysvals.ftracefile, False)
+               doError('%s does not exist' % sysvals.ftracefile)
 
        sysvals.setupAllKprobes()
-       tracewatch = ['suspend_enter']
+       tracewatch = []
        if sysvals.usekprobes:
                tracewatch += ['sync_filesystems', 'freeze_processes', 'syscore_suspend',
                        'syscore_resume', 'resume_console', 'thaw_processes', 'CPU_ON', 'CPU_OFF']
@@ -2102,17 +2376,13 @@ def parseTraceLog():
                if(m):
                        tp.setTracerType(m.group('t'))
                        continue
-               # post resume time line: did this test run include post-resume data
-               m = re.match(sysvals.postresumefmt, line)
-               if(m):
-                       t = int(m.group('t'))
-                       if(t > 0):
-                               sysvals.postresumetime = t
-                       continue
                # device properties line
                if(re.match(sysvals.devpropfmt, line)):
                        devProps(line)
                        continue
+               # ignore all other commented lines
+               if line[0] == '#':
+                       continue
                # ftrace line: parse only valid lines
                m = re.match(tp.ftrace_line_fmt, line)
                if(not m):
@@ -2142,20 +2412,36 @@ def parseTraceLog():
                        testrun = TestRun(data)
                        testruns.append(testrun)
                        parseStamp(tp.stamp, data)
-                       if len(tp.fwdata) > data.testnumber:
-                               data.fwSuspend, data.fwResume = tp.fwdata[data.testnumber]
-                               if(data.fwSuspend > 0 or data.fwResume > 0):
-                                       data.fwValid = True
                        data.setStart(t.time)
+                       data.tKernSus = t.time
                        continue
                if(not data):
                        continue
+               # process cpu exec line
+               if t.type == 'tracing_mark_write':
+                       m = re.match(sysvals.procexecfmt, t.name)
+                       if(m):
+                               proclist = dict()
+                               for ps in m.group('ps').split(','):
+                                       val = ps.split()
+                                       if not val:
+                                               continue
+                                       name = val[0].replace('--', '-')
+                                       proclist[name] = int(val[1])
+                               data.pstl[t.time] = proclist
+                               continue
                # find the end of resume
                if(t.endMarker()):
-                       if(sysvals.usetracemarkers and sysvals.postresumetime > 0):
-                               phase = 'post_resume'
-                               data.newPhase(phase, t.time, t.time, '#F0F0F0', -1)
                        data.setEnd(t.time)
+                       if data.tKernRes == 0.0:
+                               data.tKernRes = t.time
+                       if data.dmesg['resume_complete']['end'] < 0:
+                               data.dmesg['resume_complete']['end'] = t.time
+                       if sysvals.suspendmode == 'mem' and len(tp.fwdata) > data.testnumber:
+                               data.fwSuspend, data.fwResume = tp.fwdata[data.testnumber]
+                               if(data.tSuspended != 0 and data.tResumed != 0 and \
+                                       (data.fwSuspend > 0 or data.fwResume > 0)):
+                                       data.fwValid = True
                        if(not sysvals.usetracemarkers):
                                # no trace markers? then quit and be sure to finish recording
                                # the event we used to trigger resume end
@@ -2190,8 +2476,14 @@ def parseTraceLog():
                                if(name.split('[')[0] in tracewatch):
                                        continue
                                # -- phase changes --
+                               # start of kernel suspend
+                               if(re.match('suspend_enter\[.*', t.name)):
+                                       if(isbegin):
+                                               data.dmesg[phase]['start'] = t.time
+                                               data.tKernSus = t.time
+                                       continue
                                # suspend_prepare start
-                               if(re.match('dpm_prepare\[.*', t.name)):
+                               elif(re.match('dpm_prepare\[.*', t.name)):
                                        phase = 'suspend_prepare'
                                        if(not isbegin):
                                                data.dmesg[phase]['end'] = t.time
@@ -2291,6 +2583,8 @@ def parseTraceLog():
                                p = m.group('p')
                                if(n and p):
                                        data.newAction(phase, n, pid, p, t.time, -1, drv)
+                                       if pid not in data.devpids:
+                                               data.devpids.append(pid)
                        # device callback finish
                        elif(t.type == 'device_pm_callback_end'):
                                m = re.match('(?P<drv>.*) (?P<d>.*), err.*', t.name);
@@ -2332,6 +2626,12 @@ def parseTraceLog():
                                else:
                                        e['end'] = t.time
                                        e['rdata'] = kprobedata
+                               # end of kernel resume
+                               if(kprobename == 'pm_notifier_call_chain' or \
+                                       kprobename == 'pm_restore_console'):
+                                       data.dmesg[phase]['end'] = t.time
+                                       data.tKernRes = t.time
+
                # callgraph processing
                elif sysvals.usecallgraph:
                        # create a callgraph object for the data
@@ -2348,24 +2648,37 @@ def parseTraceLog():
        if sysvals.suspendmode == 'command':
                for test in testruns:
                        for p in test.data.phases:
-                               if p == 'resume_complete':
+                               if p == 'suspend_prepare':
                                        test.data.dmesg[p]['start'] = test.data.start
                                        test.data.dmesg[p]['end'] = test.data.end
                                else:
-                                       test.data.dmesg[p]['start'] = test.data.start
-                                       test.data.dmesg[p]['end'] = test.data.start
-                       test.data.tSuspended = test.data.start
-                       test.data.tResumed = test.data.start
+                                       test.data.dmesg[p]['start'] = test.data.end
+                                       test.data.dmesg[p]['end'] = test.data.end
+                       test.data.tSuspended = test.data.end
+                       test.data.tResumed = test.data.end
                        test.data.tLow = 0
                        test.data.fwValid = False
 
-       for test in testruns:
+       # dev source and procmon events can be unreadable with mixed phase height
+       if sysvals.usedevsrc or sysvals.useprocmon:
+               sysvals.mixedphaseheight = False
+
+       for i in range(len(testruns)):
+               test = testruns[i]
+               data = test.data
+               # find the total time range for this test (begin, end)
+               tlb, tle = data.start, data.end
+               if i < len(testruns) - 1:
+                       tle = testruns[i+1].data.start
+               # add the process usage data to the timeline
+               if sysvals.useprocmon:
+                       data.createProcessUsageEvents()
                # add the traceevent data to the device hierarchy
                if(sysvals.usetraceevents):
                        # add actual trace funcs
                        for name in test.ttemp:
                                for event in test.ttemp[name]:
-                                       test.data.newActionGlobal(name, event['begin'], event['end'], event['pid'])
+                                       data.newActionGlobal(name, event['begin'], event['end'], event['pid'])
                        # add the kprobe based virtual tracefuncs as actual devices
                        for key in tp.ktemp:
                                name, pid = key
@@ -2373,24 +2686,20 @@ def parseTraceLog():
                                        continue
                                for e in tp.ktemp[key]:
                                        kb, ke = e['begin'], e['end']
-                                       if kb == ke or not test.data.isInsideTimeline(kb, ke):
+                                       if kb == ke or tlb > kb or tle <= kb:
                                                continue
-                                       test.data.newActionGlobal(e['name'], kb, ke, pid)
+                                       color = sysvals.kprobeColor(name)
+                                       data.newActionGlobal(e['name'], kb, ke, pid, color)
                        # add config base kprobes and dev kprobes
-                       for key in tp.ktemp:
-                               name, pid = key
-                               if name in sysvals.tracefuncs:
-                                       continue
-                               for e in tp.ktemp[key]:
-                                       kb, ke = e['begin'], e['end']
-                                       if kb == ke or not test.data.isInsideTimeline(kb, ke):
+                       if sysvals.usedevsrc:
+                               for key in tp.ktemp:
+                                       name, pid = key
+                                       if name in sysvals.tracefuncs or name not in sysvals.dev_tracefuncs:
                                                continue
-                                       color = sysvals.kprobeColor(e['name'])
-                                       if name not in sysvals.dev_tracefuncs:
-                                               # config base kprobe
-                                               test.data.newActionGlobal(e['name'], kb, ke, -2, color)
-                                       elif sysvals.usedevsrc:
-                                               # dev kprobe
+                                       for e in tp.ktemp[key]:
+                                               kb, ke = e['begin'], e['end']
+                                               if kb == ke or tlb > kb or tle <= kb:
+                                                       continue
                                                data.addDeviceFunctionCall(e['name'], name, e['proc'], pid, kb,
                                                        ke, e['cdata'], e['rdata'])
                if sysvals.usecallgraph:
@@ -2407,7 +2716,7 @@ def parseTraceLog():
                                                        id+', ignoring this callback')
                                                continue
                                        # match cg data to devices
-                                       if sysvals.suspendmode == 'command' or not cg.deviceMatch(pid, test.data):
+                                       if sysvals.suspendmode == 'command' or not cg.deviceMatch(pid, data):
                                                sortkey = '%f%f%d' % (cg.start, cg.end, pid)
                                                sortlist[sortkey] = cg
                        # create blocks for orphan cg data
@@ -2416,12 +2725,11 @@ def parseTraceLog():
                                name = cg.list[0].name
                                if sysvals.isCallgraphFunc(name):
                                        vprint('Callgraph found for task %d: %.3fms, %s' % (cg.pid, (cg.end - cg.start)*1000, name))
-                                       cg.newActionFromFunction(test.data)
+                                       cg.newActionFromFunction(data)
 
        if sysvals.suspendmode == 'command':
-               if(sysvals.verbose):
-                       for data in testdata:
-                               data.printDetails()
+               for data in testdata:
+                       data.printDetails()
                return testdata
 
        # fill in any missing phases
@@ -2429,7 +2737,7 @@ def parseTraceLog():
                lp = data.phases[0]
                for p in data.phases:
                        if(data.dmesg[p]['start'] < 0 and data.dmesg[p]['end'] < 0):
-                               print('WARNING: phase "%s" is missing!' % p)
+                               vprint('WARNING: phase "%s" is missing!' % p)
                        if(data.dmesg[p]['start'] < 0):
                                data.dmesg[p]['start'] = data.dmesg[lp]['end']
                                if(p == 'resume_machine'):
@@ -2438,60 +2746,27 @@ def parseTraceLog():
                                        data.tLow = 0
                        if(data.dmesg[p]['end'] < 0):
                                data.dmesg[p]['end'] = data.dmesg[p]['start']
+                       if(p != lp and not ('machine' in p and 'machine' in lp)):
+                               data.dmesg[lp]['end'] = data.dmesg[p]['start']
                        lp = p
 
                if(len(sysvals.devicefilter) > 0):
                        data.deviceFilter(sysvals.devicefilter)
                data.fixupInitcallsThatDidntReturn()
-               if(sysvals.verbose):
-                       data.printDetails()
+               if sysvals.usedevsrc:
+                       data.optimizeDevSrc()
+               data.printDetails()
 
+       # x2: merge any overlapping devices between test runs
+       if sysvals.usedevsrc and len(testdata) > 1:
+               tc = len(testdata)
+               for i in range(tc - 1):
+                       devlist = testdata[i].overflowDevices()
+                       for j in range(i + 1, tc):
+                               testdata[j].mergeOverlapDevices(devlist)
+               testdata[0].stitchTouchingThreads(testdata[1:])
        return testdata
 
-# Function: loadRawKernelLog
-# Description:
-#       Load a raw kernel log that wasn't created by this tool, it might be
-#       possible to extract a valid suspend/resume log
-def loadRawKernelLog(dmesgfile):
-       global sysvals
-
-       stamp = {'time': '', 'host': '', 'mode': 'mem', 'kernel': ''}
-       stamp['time'] = datetime.now().strftime('%B %d %Y, %I:%M:%S %p')
-       stamp['host'] = sysvals.hostname
-
-       testruns = []
-       data = 0
-       lf = open(dmesgfile, 'r')
-       for line in lf:
-               line = line.replace('\r\n', '')
-               idx = line.find('[')
-               if idx > 1:
-                       line = line[idx:]
-               m = re.match('[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
-               if(not m):
-                       continue
-               msg = m.group("msg")
-               m = re.match('PM: Syncing filesystems.*', msg)
-               if(m):
-                       if(data):
-                               testruns.append(data)
-                       data = Data(len(testruns))
-                       data.stamp = stamp
-               if(data):
-                       m = re.match('.* *(?P<k>[0-9]\.[0-9]{2}\.[0-9]-.*) .*', msg)
-                       if(m):
-                               stamp['kernel'] = m.group('k')
-                       m = re.match('PM: Preparing system for (?P<m>.*) sleep', msg)
-                       if(m):
-                               stamp['mode'] = m.group('m')
-                       data.dmesgtext.append(line)
-       if(data):
-               testruns.append(data)
-               sysvals.stamp = stamp
-               sysvals.suspendmode = stamp['mode']
-       lf.close()
-       return testruns
-
 # Function: loadKernelLog
 # Description:
 #       [deprecated for kernel 3.15.0 or newer]
@@ -2499,15 +2774,16 @@ def loadRawKernelLog(dmesgfile):
 #       The dmesg filename is taken from sysvals
 # Output:
 #       An array of empty Data objects with only their dmesgtext attributes set
-def loadKernelLog():
-       global sysvals
-
+def loadKernelLog(justtext=False):
        vprint('Analyzing the dmesg data...')
        if(os.path.exists(sysvals.dmesgfile) == False):
-               doError('%s does not exist' % sysvals.dmesgfile, False)
+               doError('%s does not exist' % sysvals.dmesgfile)
 
+       if justtext:
+               dmesgtext = []
        # there can be multiple test runs in a single file
        tp = TestProps()
+       tp.stamp = datetime.now().strftime('# suspend-%m%d%y-%H%M%S localhost mem unknown')
        testruns = []
        data = 0
        lf = open(sysvals.dmesgfile, 'r')
@@ -2528,6 +2804,9 @@ def loadKernelLog():
                if(not m):
                        continue
                msg = m.group("msg")
+               if justtext:
+                       dmesgtext.append(line)
+                       continue
                if(re.match('PM: Syncing filesystems.*', msg)):
                        if(data):
                                testruns.append(data)
@@ -2537,24 +2816,24 @@ def loadKernelLog():
                                data.fwSuspend, data.fwResume = tp.fwdata[data.testnumber]
                                if(data.fwSuspend > 0 or data.fwResume > 0):
                                        data.fwValid = True
-               if(re.match('ACPI: resume from mwait', msg)):
-                       print('NOTE: This suspend appears to be freeze rather than'+\
-                               ' %s, it will be treated as such' % sysvals.suspendmode)
-                       sysvals.suspendmode = 'freeze'
                if(not data):
                        continue
+               m = re.match('.* *(?P<k>[0-9]\.[0-9]{2}\.[0-9]-.*) .*', msg)
+               if(m):
+                       sysvals.stamp['kernel'] = m.group('k')
+               m = re.match('PM: Preparing system for (?P<m>.*) sleep', msg)
+               if(m):
+                       sysvals.stamp['mode'] = sysvals.suspendmode = m.group('m')
                data.dmesgtext.append(line)
-       if(data):
-               testruns.append(data)
        lf.close()
 
-       if(len(testruns) < 1):
-               # bad log, but see if you can extract something meaningful anyway
-               testruns = loadRawKernelLog(sysvals.dmesgfile)
-
-       if(len(testruns) < 1):
-               doError(' dmesg log is completely unreadable: %s' \
-                       % sysvals.dmesgfile, False)
+       if justtext:
+               return dmesgtext
+       if data:
+               testruns.append(data)
+       if len(testruns) < 1:
+               doError(' dmesg log has no suspend/resume data: %s' \
+                       % sysvals.dmesgfile)
 
        # fix lines with same timestamp/function with the call and return swapped
        for data in testruns:
@@ -2586,8 +2865,6 @@ def loadKernelLog():
 # Output:
 #       The filled Data object
 def parseKernelLog(data):
-       global sysvals
-
        phase = 'suspend_runtime'
 
        if(data.fwValid):
@@ -2645,7 +2922,6 @@ def parseKernelLog(data):
        prevktime = -1.0
        actions = dict()
        for line in data.dmesgtext:
-               # -- preprocessing --
                # parse each dmesg line into the time and message
                m = re.match('[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
                if(m):
@@ -2653,8 +2929,6 @@ def parseKernelLog(data):
                        try:
                                ktime = float(val)
                        except:
-                               doWarning('INVALID DMESG LINE: '+\
-                                       line.replace('\n', ''), 'dmesg')
                                continue
                        msg = m.group('msg')
                        # initialize data start to first line time
@@ -2672,12 +2946,12 @@ def parseKernelLog(data):
                        phase = 'resume_noirq'
                        data.dmesg[phase]['start'] = ktime
 
-               # -- phase changes --
                # suspend start
                if(re.match(dm['suspend_prepare'], msg)):
                        phase = 'suspend_prepare'
                        data.dmesg[phase]['start'] = ktime
                        data.setStart(ktime)
+                       data.tKernSus = ktime
                # suspend start
                elif(re.match(dm['suspend'], msg)):
                        data.dmesg['suspend_prepare']['end'] = ktime
@@ -2734,7 +3008,7 @@ def parseKernelLog(data):
                elif(re.match(dm['post_resume'], msg)):
                        data.dmesg['resume_complete']['end'] = ktime
                        data.setEnd(ktime)
-                       phase = 'post_resume'
+                       data.tKernRes = ktime
                        break
 
                # -- device callbacks --
@@ -2761,7 +3035,6 @@ def parseKernelLog(data):
                                        dev['length'] = int(t)
                                        dev['end'] = ktime
 
-               # -- non-devicecallback actions --
                # if trace events are not available, these are better than nothing
                if(not sysvals.usetraceevents):
                        # look for known actions
@@ -2821,8 +3094,7 @@ def parseKernelLog(data):
                for event in actions[name]:
                        data.newActionGlobal(name, event['begin'], event['end'])
 
-       if(sysvals.verbose):
-               data.printDetails()
+       data.printDetails()
        if(len(sysvals.devicefilter) > 0):
                data.deviceFilter(sysvals.devicefilter)
        data.fixupInitcallsThatDidntReturn()
@@ -2834,8 +3106,6 @@ def parseKernelLog(data):
 # Arguments:
 #       testruns: array of Data objects from parseTraceLog
 def createHTMLSummarySimple(testruns, htmlfile):
-       global sysvals
-
        # print out the basic summary of all the tests
        hf = open(htmlfile, 'w')
 
@@ -2960,7 +3230,6 @@ def createHTMLSummarySimple(testruns, htmlfile):
        hf.close()
 
 def htmlTitle():
-       global sysvals
        modename = {
                'freeze': 'Freeze (S0)',
                'standby': 'Standby (S1)',
@@ -2993,13 +3262,14 @@ def ordinal(value):
 # Output:
 #       True if the html file was created, false if it failed
 def createHTML(testruns):
-       global sysvals
-
        if len(testruns) < 1:
                print('ERROR: Not enough test data to build a timeline')
                return
 
+       kerror = False
        for data in testruns:
+               if data.kerror:
+                       kerror = True
                data.normalizeTime(testruns[-1].tSuspended)
 
        x2changes = ['', 'absolute']
@@ -3009,53 +3279,59 @@ def createHTML(testruns):
        headline_version = '<div class="version"><a href="https://01.org/suspendresume">AnalyzeSuspend v%s</a></div>' % sysvals.version
        headline_stamp = '<div class="stamp">{0} {1} {2} {3}</div>\n'
        html_devlist1 = '<button id="devlist1" class="devlist" style="float:left;">Device Detail%s</button>' % x2changes[0]
-       html_zoombox = '<center><button id="zoomin">ZOOM IN</button><button id="zoomout">ZOOM OUT</button><button id="zoomdef">ZOOM 1:1</button></center>\n'
+       html_zoombox = '<center><button id="zoomin">ZOOM IN +</button><button id="zoomout">ZOOM OUT -</button><button id="zoomdef">ZOOM 1:1</button></center>\n'
        html_devlist2 = '<button id="devlist2" class="devlist" style="float:right;">Device Detail2</button>\n'
        html_timeline = '<div id="dmesgzoombox" class="zoombox">\n<div id="{0}" class="timeline" style="height:{1}px">\n'
-       html_tblock = '<div id="block{0}" class="tblock" style="left:{1}%;width:{2}%;">\n'
+       html_tblock = '<div id="block{0}" class="tblock" style="left:{1}%;width:{2}%;"><div class="tback" style="height:{3}px"></div>\n'
        html_device = '<div id="{0}" title="{1}" class="thread{7}" style="left:{2}%;top:{3}px;height:{4}px;width:{5}%;{8}">{6}</div>\n'
-       html_traceevent = '<div title="{0}" class="traceevent" style="left:{1}%;top:{2}px;height:{3}px;width:{4}%;line-height:{3}px;">{5}</div>\n'
+       html_error = '<div id="{1}" title="kernel error/warning" class="err" style="right:{0}%">ERROR&rarr;</div>\n'
+       html_traceevent = '<div title="{0}" class="traceevent{6}" style="left:{1}%;top:{2}px;height:{3}px;width:{4}%;line-height:{3}px;{7}">{5}</div>\n'
+       html_cpuexec = '<div class="jiffie" style="left:{0}%;top:{1}px;height:{2}px;width:{3}%;background:{4};"></div>\n'
        html_phase = '<div class="phase" style="left:{0}%;width:{1}%;top:{2}px;height:{3}px;background-color:{4}">{5}</div>\n'
-       html_phaselet = '<div id="{0}" class="phaselet" style="left:{1}%;width:{2}%;background-color:{3}"></div>\n'
+       html_phaselet = '<div id="{0}" class="phaselet" style="left:{1}%;width:{2}%;background:{3}"></div>\n'
        html_legend = '<div id="p{3}" class="square" style="left:{0}%;background-color:{1}">&nbsp;{2}</div>\n'
        html_timetotal = '<table class="time1">\n<tr>'\
-               '<td class="green">{2} Suspend Time: <b>{0} ms</b></td>'\
-               '<td class="yellow">{2} Resume Time: <b>{1} ms</b></td>'\
+               '<td class="green" title="{3}">{2} Suspend Time: <b>{0} ms</b></td>'\
+               '<td class="yellow" title="{4}">{2} Resume Time: <b>{1} ms</b></td>'\
                '</tr>\n</table>\n'
        html_timetotal2 = '<table class="time1">\n<tr>'\
-               '<td class="green">{3} Suspend Time: <b>{0} ms</b></td>'\
-               '<td class="gray">'+sysvals.suspendmode+' time: <b>{1} ms</b></td>'\
-               '<td class="yellow">{3} Resume Time: <b>{2} ms</b></td>'\
+               '<td class="green" title="{4}">{3} Suspend Time: <b>{0} ms</b></td>'\
+               '<td class="gray" title="time spent in low-power mode with clock running">'+sysvals.suspendmode+' time: <b>{1} ms</b></td>'\
+               '<td class="yellow" title="{5}">{3} Resume Time: <b>{2} ms</b></td>'\
                '</tr>\n</table>\n'
        html_timetotal3 = '<table class="time1">\n<tr>'\
                '<td class="green">Execution Time: <b>{0} ms</b></td>'\
                '<td class="yellow">Command: <b>{1}</b></td>'\
                '</tr>\n</table>\n'
        html_timegroups = '<table class="time2">\n<tr>'\
-               '<td class="green">{4}Kernel Suspend: {0} ms</td>'\
+               '<td class="green" title="time from kernel enter_state({5}) to firmware mode [kernel time only]">{4}Kernel Suspend: {0} ms</td>'\
                '<td class="purple">{4}Firmware Suspend: {1} ms</td>'\
                '<td class="purple">{4}Firmware Resume: {2} ms</td>'\
-               '<td class="yellow">{4}Kernel Resume: {3} ms</td>'\
+               '<td class="yellow" title="time from firmware mode to return from kernel enter_state({5}) [kernel time only]">{4}Kernel Resume: {3} ms</td>'\
                '</tr>\n</table>\n'
 
        # html format variables
-       rowheight = 30
-       devtextS = '14px'
-       devtextH = '30px'
-       hoverZ = 'z-index:10;'
-
+       hoverZ = 'z-index:8;'
        if sysvals.usedevsrc:
                hoverZ = ''
+       scaleH = 20
+       scaleTH = 20
+       if kerror:
+               scaleH = 40
+               scaleTH = 60
 
        # device timeline
        vprint('Creating Device Timeline...')
 
-       devtl = Timeline(rowheight)
+       devtl = Timeline(30, scaleH)
 
        # Generate the header for this timeline
        for data in testruns:
                tTotal = data.end - data.start
-               tEnd = data.dmesg['resume_complete']['end']
+               sktime = (data.dmesg['suspend_machine']['end'] - \
+                       data.tKernSus) * 1000
+               rktime = (data.dmesg['resume_complete']['end'] - \
+                       data.dmesg['resume_machine']['start']) * 1000
                if(tTotal == 0):
                        print('ERROR: No timeline data')
                        sys.exit()
@@ -3072,59 +3348,85 @@ def createHTML(testruns):
                        thtml = html_timetotal3.format(run_time, testdesc)
                        devtl.html['header'] += thtml
                elif data.fwValid:
-                       suspend_time = '%.0f'%((data.tSuspended-data.start)*1000 + \
-                               (data.fwSuspend/1000000.0))
-                       resume_time = '%.0f'%((tEnd-data.tSuspended)*1000 + \
-                               (data.fwResume/1000000.0))
+                       suspend_time = '%.0f'%(sktime + (data.fwSuspend/1000000.0))
+                       resume_time = '%.0f'%(rktime + (data.fwResume/1000000.0))
                        testdesc1 = 'Total'
                        testdesc2 = ''
+                       stitle = 'time from kernel enter_state(%s) to low-power mode [kernel & firmware time]' % sysvals.suspendmode
+                       rtitle = 'time from low-power mode to return from kernel enter_state(%s) [firmware & kernel time]' % sysvals.suspendmode
                        if(len(testruns) > 1):
                                testdesc1 = testdesc2 = ordinal(data.testnumber+1)
                                testdesc2 += ' '
                        if(data.tLow == 0):
                                thtml = html_timetotal.format(suspend_time, \
-                                       resume_time, testdesc1)
+                                       resume_time, testdesc1, stitle, rtitle)
                        else:
                                thtml = html_timetotal2.format(suspend_time, low_time, \
-                                       resume_time, testdesc1)
+                                       resume_time, testdesc1, stitle, rtitle)
                        devtl.html['header'] += thtml
-                       sktime = '%.3f'%((data.dmesg['suspend_machine']['end'] - \
-                               data.getStart())*1000)
                        sftime = '%.3f'%(data.fwSuspend / 1000000.0)
                        rftime = '%.3f'%(data.fwResume / 1000000.0)
-                       rktime = '%.3f'%((data.dmesg['resume_complete']['end'] - \
-                               data.dmesg['resume_machine']['start'])*1000)
-                       devtl.html['header'] += html_timegroups.format(sktime, \
-                               sftime, rftime, rktime, testdesc2)
+                       devtl.html['header'] += html_timegroups.format('%.3f'%sktime, \
+                               sftime, rftime, '%.3f'%rktime, testdesc2, sysvals.suspendmode)
                else:
-                       suspend_time = '%.0f'%((data.tSuspended-data.start)*1000)
-                       resume_time = '%.0f'%((tEnd-data.tSuspended)*1000)
+                       suspend_time = '%.3f' % sktime
+                       resume_time = '%.3f' % rktime
                        testdesc = 'Kernel'
+                       stitle = 'time from kernel enter_state(%s) to firmware mode [kernel time only]' % sysvals.suspendmode
+                       rtitle = 'time from firmware mode to return from kernel enter_state(%s) [kernel time only]' % sysvals.suspendmode
                        if(len(testruns) > 1):
                                testdesc = ordinal(data.testnumber+1)+' '+testdesc
                        if(data.tLow == 0):
                                thtml = html_timetotal.format(suspend_time, \
-                                       resume_time, testdesc)
+                                       resume_time, testdesc, stitle, rtitle)
                        else:
                                thtml = html_timetotal2.format(suspend_time, low_time, \
-                                       resume_time, testdesc)
+                                       resume_time, testdesc, stitle, rtitle)
                        devtl.html['header'] += thtml
 
        # time scale for potentially multiple datasets
        t0 = testruns[0].start
        tMax = testruns[-1].end
-       tSuspended = testruns[-1].tSuspended
        tTotal = tMax - t0
 
        # determine the maximum number of rows we need to draw
+       fulllist = []
+       threadlist = []
+       pscnt = 0
+       devcnt = 0
        for data in testruns:
                data.selectTimelineDevices('%f', tTotal, sysvals.mindevlen)
                for group in data.devicegroups:
                        devlist = []
                        for phase in group:
                                for devname in data.tdevlist[phase]:
-                                       devlist.append((phase,devname))
-                       devtl.getPhaseRows(data.dmesg, devlist)
+                                       d = DevItem(data.testnumber, phase, data.dmesg[phase]['list'][devname])
+                                       devlist.append(d)
+                                       if d.isa('kth'):
+                                               threadlist.append(d)
+                                       else:
+                                               if d.isa('ps'):
+                                                       pscnt += 1
+                                               else:
+                                                       devcnt += 1
+                                               fulllist.append(d)
+                       if sysvals.mixedphaseheight:
+                               devtl.getPhaseRows(devlist)
+       if not sysvals.mixedphaseheight:
+               if len(threadlist) > 0 and len(fulllist) > 0:
+                       if pscnt > 0 and devcnt > 0:
+                               msg = 'user processes & device pm callbacks'
+                       elif pscnt > 0:
+                               msg = 'user processes'
+                       else:
+                               msg = 'device pm callbacks'
+                       d = testruns[0].addHorizontalDivider(msg, testruns[-1].end)
+                       fulllist.insert(0, d)
+               devtl.getPhaseRows(fulllist)
+               if len(threadlist) > 0:
+                       d = testruns[0].addHorizontalDivider('asynchronous kernel threads', testruns[-1].end)
+                       threadlist.insert(0, d)
+                       devtl.getPhaseRows(threadlist, devtl.rows)
        devtl.calcTotalRows()
 
        # create bounding box, add buttons
@@ -3145,18 +3447,6 @@ def createHTML(testruns):
 
        # draw each test run chronologically
        for data in testruns:
-               # if nore than one test, draw a block to represent user mode
-               if(data.testnumber > 0):
-                       m0 = testruns[data.testnumber-1].end
-                       mMax = testruns[data.testnumber].start
-                       mTotal = mMax - m0
-                       name = 'usermode%d' % data.testnumber
-                       top = '%d' % devtl.scaleH
-                       left = '%f' % (((m0-t0)*100.0)/tTotal)
-                       width = '%f' % ((mTotal*100.0)/tTotal)
-                       title = 'user mode (%0.3f ms) ' % (mTotal*1000)
-                       devtl.html['timeline'] += html_device.format(name, \
-                               title, left, top, '%d'%devtl.bodyH, width, '', '', '')
                # now draw the actual timeline blocks
                for dir in phases:
                        # draw suspend and resume blocks separately
@@ -3169,13 +3459,16 @@ def createHTML(testruns):
                        else:
                                m0 = testruns[data.testnumber].tSuspended
                                mMax = testruns[data.testnumber].end
+                               # in an x2 run, remove any gap between blocks
+                               if len(testruns) > 1 and data.testnumber == 0:
+                                       mMax = testruns[1].start
                                mTotal = mMax - m0
                                left = '%f' % ((((m0-t0)*100.0)+sysvals.srgap/2)/tTotal)
                        # if a timeline block is 0 length, skip altogether
                        if mTotal == 0:
                                continue
                        width = '%f' % (((mTotal*100.0)-sysvals.srgap/2)/tTotal)
-                       devtl.html['timeline'] += html_tblock.format(bname, left, width)
+                       devtl.html['timeline'] += html_tblock.format(bname, left, width, devtl.scaleH)
                        for b in sorted(phases[dir]):
                                # draw the phase color background
                                phase = data.dmesg[b]
@@ -3185,6 +3478,12 @@ def createHTML(testruns):
                                devtl.html['timeline'] += html_phase.format(left, width, \
                                        '%.3f'%devtl.scaleH, '%.3f'%devtl.bodyH, \
                                        data.dmesg[b]['color'], '')
+                       for e in data.errorinfo[dir]:
+                               # draw red lines for any kernel errors found
+                               t, err = e
+                               right = '%f' % (((mMax-t)*100.0)/mTotal)
+                               devtl.html['timeline'] += html_error.format(right, err)
+                       for b in sorted(phases[dir]):
                                # draw the devices for this phase
                                phaselist = data.dmesg[b]['list']
                                for d in data.tdevlist[b]:
@@ -3196,46 +3495,62 @@ def createHTML(testruns):
                                        xtrastyle = ''
                                        if 'htmlclass' in dev:
                                                xtraclass = dev['htmlclass']
-                                               xtrainfo = dev['htmlclass']
                                        if 'color' in dev:
                                                xtrastyle = 'background-color:%s;' % dev['color']
                                        if(d in sysvals.devprops):
                                                name = sysvals.devprops[d].altName(d)
                                                xtraclass = sysvals.devprops[d].xtraClass()
                                                xtrainfo = sysvals.devprops[d].xtraInfo()
+                                       elif xtraclass == ' kth':
+                                               xtrainfo = ' kernel_thread'
                                        if('drv' in dev and dev['drv']):
                                                drv = ' {%s}' % dev['drv']
-                                       rowheight = devtl.phaseRowHeight(b, dev['row'])
-                                       rowtop = devtl.phaseRowTop(b, dev['row'])
+                                       rowheight = devtl.phaseRowHeight(data.testnumber, b, dev['row'])
+                                       rowtop = devtl.phaseRowTop(data.testnumber, b, dev['row'])
                                        top = '%.3f' % (rowtop + devtl.scaleH)
                                        left = '%f' % (((dev['start']-m0)*100)/mTotal)
                                        width = '%f' % (((dev['end']-dev['start'])*100)/mTotal)
                                        length = ' (%0.3f ms) ' % ((dev['end']-dev['start'])*1000)
+                                       title = name+drv+xtrainfo+length
                                        if sysvals.suspendmode == 'command':
-                                               title = name+drv+xtrainfo+length+'cmdexec'
+                                               title += sysvals.testcommand
+                                       elif xtraclass == ' ps':
+                                               if 'suspend' in b:
+                                                       title += 'pre_suspend_process'
+                                               else:
+                                                       title += 'post_resume_process'
                                        else:
-                                               title = name+drv+xtrainfo+length+b
+                                               title += b
                                        devtl.html['timeline'] += html_device.format(dev['id'], \
                                                title, left, top, '%.3f'%rowheight, width, \
                                                d+drv, xtraclass, xtrastyle)
+                                       if('cpuexec' in dev):
+                                               for t in sorted(dev['cpuexec']):
+                                                       start, end = t
+                                                       j = float(dev['cpuexec'][t]) / 5
+                                                       if j > 1.0:
+                                                               j = 1.0
+                                                       height = '%.3f' % (rowheight/3)
+                                                       top = '%.3f' % (rowtop + devtl.scaleH + 2*rowheight/3)
+                                                       left = '%f' % (((start-m0)*100)/mTotal)
+                                                       width = '%f' % ((end-start)*100/mTotal)
+                                                       color = 'rgba(255, 0, 0, %f)' % j
+                                                       devtl.html['timeline'] += \
+                                                               html_cpuexec.format(left, top, height, width, color)
                                        if('src' not in dev):
                                                continue
                                        # draw any trace events for this device
-                                       vprint('Debug trace events found for device %s' % d)
-                                       vprint('%20s %20s %10s %8s' % ('title', \
-                                               'name', 'time(ms)', 'length(ms)'))
                                        for e in dev['src']:
-                                               vprint('%20s %20s %10.3f %8.3f' % (e.title, \
-                                                       e.text, e.time*1000, e.length*1000))
-                                               height = devtl.rowH
+                                               height = '%.3f' % devtl.rowH
                                                top = '%.3f' % (rowtop + devtl.scaleH + (e.row*devtl.rowH))
                                                left = '%f' % (((e.time-m0)*100)/mTotal)
                                                width = '%f' % (e.length*100/mTotal)
-                                               color = 'rgba(204,204,204,0.5)'
+                                               xtrastyle = ''
+                                               if e.color:
+                                                       xtrastyle = 'background:%s;' % e.color
                                                devtl.html['timeline'] += \
-                                                       html_traceevent.format(e.title, \
-                                                               left, top, '%.3f'%height, \
-                                                               width, e.text)
+                                                       html_traceevent.format(e.title(), \
+                                                               left, top, height, width, e.text(), '', xtrastyle)
                        # draw the time scale, try to make the number of labels readable
                        devtl.html['timeline'] += devtl.createTimeScale(m0, mMax, tTotal, dir)
                        devtl.html['timeline'] += '</div>\n'
@@ -3284,8 +3599,7 @@ def createHTML(testruns):
                t2 {color:black;font:25px Times;}\n\
                t3 {color:black;font:20px Times;white-space:nowrap;}\n\
                t4 {color:black;font:bold 30px Times;line-height:60px;white-space:nowrap;}\n\
-               cS {color:blue;font:bold 11px Times;}\n\
-               cR {color:red;font:bold 11px Times;}\n\
+               cS {font:bold 13px Times;}\n\
                table {width:100%;}\n\
                .gray {background-color:rgba(80,80,80,0.1);}\n\
                .green {background-color:rgba(204,255,204,0.4);}\n\
@@ -3302,20 +3616,22 @@ def createHTML(testruns):
                .pf:'+cgchk+' + label {background:url(\'data:image/svg+xml;utf,<?xml version="1.0" standalone="no"?><svg xmlns="http://www.w3.org/2000/svg" height="18" width="18" version="1.1"><circle cx="9" cy="9" r="8" stroke="black" stroke-width="1" fill="white"/><rect x="4" y="8" width="10" height="2" style="fill:black;stroke-width:0"/><rect x="8" y="4" width="2" height="10" style="fill:black;stroke-width:0"/></svg>\') no-repeat left center;}\n\
                .pf:'+cgnchk+' ~ label {background:url(\'data:image/svg+xml;utf,<?xml version="1.0" standalone="no"?><svg xmlns="http://www.w3.org/2000/svg" height="18" width="18" version="1.1"><circle cx="9" cy="9" r="8" stroke="black" stroke-width="1" fill="white"/><rect x="4" y="8" width="10" height="2" style="fill:black;stroke-width:0"/></svg>\') no-repeat left center;}\n\
                .pf:'+cgchk+' ~ *:not(:nth-child(2)) {display:none;}\n\
-               .zoombox {position:relative;width:100%;overflow-x:scroll;}\n\
+               .zoombox {position:relative;width:100%;overflow-x:scroll;-webkit-user-select:none;-moz-user-select:none;user-select:none;}\n\
                .timeline {position:relative;font-size:14px;cursor:pointer;width:100%; overflow:hidden;background:linear-gradient(#cccccc, white);}\n\
-               .thread {position:absolute;height:0%;overflow:hidden;line-height:'+devtextH+';font-size:'+devtextS+';border:1px solid;text-align:center;white-space:nowrap;background-color:rgba(204,204,204,0.5);}\n\
-               .thread.sync {background-color:'+sysvals.synccolor+';}\n\
-               .thread.bg {background-color:'+sysvals.kprobecolor+';}\n\
+               .thread {position:absolute;height:0%;overflow:hidden;z-index:7;line-height:30px;font-size:14px;border:1px solid;text-align:center;white-space:nowrap;}\n\
+               .thread.ps {border-radius:3px;background:linear-gradient(to top, #ccc, #eee);}\n\
                .thread:hover {background-color:white;border:1px solid red;'+hoverZ+'}\n\
+               .thread.sec,.thread.sec:hover {background-color:black;border:0;color:white;line-height:15px;font-size:10px;}\n\
                .hover {background-color:white;border:1px solid red;'+hoverZ+'}\n\
                .hover.sync {background-color:white;}\n\
-               .hover.bg {background-color:white;}\n\
-               .traceevent {position:absolute;font-size:10px;overflow:hidden;color:black;text-align:center;white-space:nowrap;border-radius:5px;border:1px solid black;background:linear-gradient(to bottom right,rgba(204,204,204,1),rgba(150,150,150,1));}\n\
-               .traceevent:hover {background:white;}\n\
+               .hover.bg,.hover.kth,.hover.sync,.hover.ps {background-color:white;}\n\
+               .jiffie {position:absolute;pointer-events: none;z-index:8;}\n\
+               .traceevent {position:absolute;font-size:10px;z-index:7;overflow:hidden;color:black;text-align:center;white-space:nowrap;border-radius:5px;border:1px solid black;background:linear-gradient(to bottom right,#CCC,#969696);}\n\
+               .traceevent:hover {color:white;font-weight:bold;border:1px solid white;}\n\
                .phase {position:absolute;overflow:hidden;border:0px;text-align:center;}\n\
                .phaselet {position:absolute;overflow:hidden;border:0px;text-align:center;height:100px;font-size:24px;}\n\
-               .t {z-index:2;position:absolute;pointer-events:none;top:0%;height:100%;border-right:1px solid black;}\n\
+               .t {position:absolute;line-height:'+('%d'%scaleTH)+'px;pointer-events:none;top:0;height:100%;border-right:1px solid black;z-index:6;}\n\
+               .err {position:absolute;top:0%;height:100%;border-right:3px solid red;color:red;font:bold 14px Times;line-height:18px;}\n\
                .legend {position:relative; width:100%; height:40px; text-align:center;margin-bottom:20px}\n\
                .legend .square {position:absolute;cursor:pointer;top:10px; width:0px;height:20px;border:1px solid;padding-left:20px;}\n\
                button {height:40px;width:200px;margin-bottom:20px;margin-top:20px;font-size:24px;}\n\
@@ -3327,7 +3643,8 @@ def createHTML(testruns):
                a:active {color:white;}\n\
                .version {position:relative;float:left;color:white;font-size:10px;line-height:30px;margin-left:10px;}\n\
                #devicedetail {height:100px;box-shadow:5px 5px 20px black;}\n\
-               .tblock {position:absolute;height:100%;}\n\
+               .tblock {position:absolute;height:100%;background-color:#ddd;}\n\
+               .tback {position:absolute;width:100%;background:linear-gradient(#ccc, #ddd);}\n\
                .bg {z-index:1;}\n\
        </style>\n</head>\n<body>\n'
 
@@ -3342,6 +3659,8 @@ def createHTML(testruns):
        # write the test title and general info header
        if(sysvals.stamp['time'] != ""):
                hf.write(headline_version)
+               if sysvals.logmsg:
+                       hf.write('<button id="showtest" class="logbtn">log</button>')
                if sysvals.addlogs and sysvals.dmesgfile:
                        hf.write('<button id="showdmesg" class="logbtn">dmesg</button>')
                if sysvals.addlogs and sysvals.ftracefile:
@@ -3359,6 +3678,9 @@ def createHTML(testruns):
        # draw the colored boxes for the device detail section
        for data in testruns:
                hf.write('<div id="devicedetail%d">\n' % data.testnumber)
+               pscolor = 'linear-gradient(to top left, #ccc, #eee)'
+               hf.write(html_phaselet.format('pre_suspend_process', \
+                       '0', '0', pscolor))
                for b in data.phases:
                        phase = data.dmesg[b]
                        length = phase['end']-phase['start']
@@ -3366,14 +3688,18 @@ def createHTML(testruns):
                        width = '%.3f' % ((length*100.0)/tTotal)
                        hf.write(html_phaselet.format(b, left, width, \
                                data.dmesg[b]['color']))
+               hf.write(html_phaselet.format('post_resume_process', \
+                       '0', '0', pscolor))
                if sysvals.suspendmode == 'command':
-                       hf.write(html_phaselet.format('cmdexec', '0', '0', \
-                               data.dmesg['resume_complete']['color']))
+                       hf.write(html_phaselet.format('cmdexec', '0', '0', pscolor))
                hf.write('</div>\n')
        hf.write('</div>\n')
 
        # write the ftrace data (callgraph)
-       data = testruns[-1]
+       if sysvals.cgtest >= 0 and len(testruns) > sysvals.cgtest:
+               data = testruns[sysvals.cgtest]
+       else:
+               data = testruns[-1]
        if(sysvals.usecallgraph and not sysvals.embedded):
                hf.write('<section id="callgraphs" class="callgraph">\n')
                # write out the ftrace data converted to html
@@ -3383,6 +3709,8 @@ def createHTML(testruns):
                html_func_leaf = '<article>{0} {1}</article>\n'
                num = 0
                for p in data.phases:
+                       if sysvals.cgphase and p != sysvals.cgphase:
+                               continue
                        list = data.dmesg[p]['list']
                        for devname in data.sortedDevices(p):
                                if('ftrace' not in list[devname]):
@@ -3420,11 +3748,15 @@ def createHTML(testruns):
                                hf.write(html_func_end)
                hf.write('\n\n    </section>\n')
 
+       # add the test log as a hidden div
+       if sysvals.logmsg:
+               hf.write('<div id="testlog" style="display:none;">\n'+sysvals.logmsg+'</div>\n')
        # add the dmesg log as a hidden div
        if sysvals.addlogs and sysvals.dmesgfile:
                hf.write('<div id="dmesglog" style="display:none;">\n')
                lf = open(sysvals.dmesgfile, 'r')
                for line in lf:
+                       line = line.replace('<', '&lt').replace('>', '&gt')
                        hf.write(line)
                lf.close()
                hf.write('</div>\n')
@@ -3475,8 +3807,9 @@ def addScriptCode(hf, testruns):
        script_code = \
        '<script type="text/javascript">\n'+detail+\
        '       var resolution = -1;\n'\
+       '       var dragval = [0, 0];\n'\
        '       function redrawTimescale(t0, tMax, tS) {\n'\
-       '               var rline = \'<div class="t" style="left:0;border-left:1px solid black;border-right:0;"><cR><-R</cR></div>\';\n'\
+       '               var rline = \'<div class="t" style="left:0;border-left:1px solid black;border-right:0;"><cS>&larr;R</cS></div>\';\n'\
        '               var tTotal = tMax - t0;\n'\
        '               var list = document.getElementsByClassName("tblock");\n'\
        '               for (var i = 0; i < list.length; i++) {\n'\
@@ -3501,7 +3834,7 @@ def addScriptCode(hf, testruns):
        '                                       pos = 100 - (((j)*tS*100)/mTotal) - divEdge;\n'\
        '                                       val = (j-divTotal+1)*tS;\n'\
        '                                       if(j == divTotal - 1)\n'\
-       '                                               htmlline = \'<div class="t" style="right:\'+pos+\'%"><cS>S-></cS></div>\';\n'\
+       '                                               htmlline = \'<div class="t" style="right:\'+pos+\'%"><cS>S&rarr;</cS></div>\';\n'\
        '                                       else\n'\
        '                                               htmlline = \'<div class="t" style="right:\'+pos+\'%">\'+val+\'ms</div>\';\n'\
        '                               }\n'\
@@ -3513,6 +3846,7 @@ def addScriptCode(hf, testruns):
        '       function zoomTimeline() {\n'\
        '               var dmesg = document.getElementById("dmesg");\n'\
        '               var zoombox = document.getElementById("dmesgzoombox");\n'\
+       '               var left = zoombox.scrollLeft;\n'\
        '               var val = parseFloat(dmesg.style.width);\n'\
        '               var newval = 100;\n'\
        '               var sh = window.outerWidth / 2;\n'\
@@ -3520,12 +3854,12 @@ def addScriptCode(hf, testruns):
        '                       newval = val * 1.2;\n'\
        '                       if(newval > 910034) newval = 910034;\n'\
        '                       dmesg.style.width = newval+"%";\n'\
-       '                       zoombox.scrollLeft = ((zoombox.scrollLeft + sh) * newval / val) - sh;\n'\
+       '                       zoombox.scrollLeft = ((left + sh) * newval / val) - sh;\n'\
        '               } else if (this.id == "zoomout") {\n'\
        '                       newval = val / 1.2;\n'\
        '                       if(newval < 100) newval = 100;\n'\
        '                       dmesg.style.width = newval+"%";\n'\
-       '                       zoombox.scrollLeft = ((zoombox.scrollLeft + sh) * newval / val) - sh;\n'\
+       '                       zoombox.scrollLeft = ((left + sh) * newval / val) - sh;\n'\
        '               } else {\n'\
        '                       zoombox.scrollLeft = 0;\n'\
        '                       dmesg.style.width = "100%";\n'\
@@ -3542,8 +3876,12 @@ def addScriptCode(hf, testruns):
        '               resolution = tS[i];\n'\
        '               redrawTimescale(t0, tMax, tS[i]);\n'\
        '       }\n'\
+       '       function deviceName(title) {\n'\
+       '               var name = title.slice(0, title.indexOf(" ("));\n'\
+       '               return name;\n'\
+       '       }\n'\
        '       function deviceHover() {\n'\
-       '               var name = this.title.slice(0, this.title.indexOf(" ("));\n'\
+       '               var name = deviceName(this.title);\n'\
        '               var dmesg = document.getElementById("dmesg");\n'\
        '               var dev = dmesg.getElementsByClassName("thread");\n'\
        '               var cpu = -1;\n'\
@@ -3552,7 +3890,7 @@ def addScriptCode(hf, testruns):
        '               else if(name.match("CPU_OFF\[[0-9]*\]"))\n'\
        '                       cpu = parseInt(name.slice(8));\n'\
        '               for (var i = 0; i < dev.length; i++) {\n'\
-       '                       dname = dev[i].title.slice(0, dev[i].title.indexOf(" ("));\n'\
+       '                       dname = deviceName(dev[i].title);\n'\
        '                       var cname = dev[i].className.slice(dev[i].className.indexOf("thread"));\n'\
        '                       if((cpu >= 0 && dname.match("CPU_O[NF]*\\\[*"+cpu+"\\\]")) ||\n'\
        '                               (name == dname))\n'\
@@ -3578,7 +3916,7 @@ def addScriptCode(hf, testruns):
        '                       total[2] = (total[2]+total[4])/2;\n'\
        '               }\n'\
        '               var devtitle = document.getElementById("devicedetailtitle");\n'\
-       '               var name = title.slice(0, title.indexOf(" ("));\n'\
+       '               var name = deviceName(title);\n'\
        '               if(cpu >= 0) name = "CPU"+cpu;\n'\
        '               var driver = "";\n'\
        '               var tS = "<t2>(</t2>";\n'\
@@ -3600,7 +3938,7 @@ def addScriptCode(hf, testruns):
        '       function deviceDetail() {\n'\
        '               var devinfo = document.getElementById("devicedetail");\n'\
        '               devinfo.style.display = "block";\n'\
-       '               var name = this.title.slice(0, this.title.indexOf(" ("));\n'\
+       '               var name = deviceName(this.title);\n'\
        '               var cpu = -1;\n'\
        '               if(name.match("CPU_ON\[[0-9]*\]"))\n'\
        '                       cpu = parseInt(name.slice(7));\n'\
@@ -3615,7 +3953,7 @@ def addScriptCode(hf, testruns):
        '               var pd = pdata[0];\n'\
        '               var total = [0.0, 0.0, 0.0];\n'\
        '               for (var i = 0; i < dev.length; i++) {\n'\
-       '                       dname = dev[i].title.slice(0, dev[i].title.indexOf(" ("));\n'\
+       '                       dname = deviceName(dev[i].title);\n'\
        '                       if((cpu >= 0 && dname.match("CPU_O[NF]*\\\[*"+cpu+"\\\]")) ||\n'\
        '                               (name == dname))\n'\
        '                       {\n'\
@@ -3656,7 +3994,7 @@ def addScriptCode(hf, testruns):
        '                                       phases[i].title = phases[i].id+" "+pd[phases[i].id]+" ms";\n'\
        '                                       left += w;\n'\
        '                                       var time = "<t4 style=\\"font-size:"+fs+"px\\">"+pd[phases[i].id]+" ms<br></t4>";\n'\
-       '                                       var pname = "<t3 style=\\"font-size:"+fs2+"px\\">"+phases[i].id.replace("_", " ")+"</t3>";\n'\
+       '                                       var pname = "<t3 style=\\"font-size:"+fs2+"px\\">"+phases[i].id.replace(new RegExp("_", "g"), " ")+"</t3>";\n'\
        '                                       phases[i].innerHTML = time+pname;\n'\
        '                               } else {\n'\
        '                                       phases[i].style.width = "0%";\n'\
@@ -3677,12 +4015,7 @@ def addScriptCode(hf, testruns):
        '               }\n'\
        '       }\n'\
        '       function devListWindow(e) {\n'\
-       '               var sx = e.clientX;\n'\
-       '               if(sx > window.innerWidth - 440)\n'\
-       '                       sx = window.innerWidth - 440;\n'\
-       '               var cfg="top="+e.screenY+", left="+sx+", width=440, height=720, scrollbars=yes";\n'\
-       '               var win = window.open("", "_blank", cfg);\n'\
-       '               if(window.chrome) win.moveBy(sx, 0);\n'\
+       '               var win = window.open();\n'\
        '               var html = "<title>"+e.target.innerHTML+"</title>"+\n'\
        '                       "<style type=\\"text/css\\">"+\n'\
        '                       "   ul {list-style-type:circle;padding-left:10px;margin-left:10px;}"+\n'\
@@ -3692,6 +4025,12 @@ def addScriptCode(hf, testruns):
        '                       dt = devtable[1];\n'\
        '               win.document.write(html+dt);\n'\
        '       }\n'\
+       '       function errWindow() {\n'\
+       '               var text = this.id;\n'\
+       '               var win = window.open();\n'\
+       '               win.document.write("<pre>"+text+"</pre>");\n'\
+       '               win.document.close();\n'\
+       '       }\n'\
        '       function logWindow(e) {\n'\
        '               var name = e.target.id.slice(4);\n'\
        '               var win = window.open();\n'\
@@ -3702,16 +4041,46 @@ def addScriptCode(hf, testruns):
        '       }\n'\
        '       function onClickPhase(e) {\n'\
        '       }\n'\
+       '       function onMouseDown(e) {\n'\
+       '               dragval[0] = e.clientX;\n'\
+       '               dragval[1] = document.getElementById("dmesgzoombox").scrollLeft;\n'\
+       '               document.onmousemove = onMouseMove;\n'\
+       '       }\n'\
+       '       function onMouseMove(e) {\n'\
+       '               var zoombox = document.getElementById("dmesgzoombox");\n'\
+       '               zoombox.scrollLeft = dragval[1] + dragval[0] - e.clientX;\n'\
+       '       }\n'\
+       '       function onMouseUp(e) {\n'\
+       '               document.onmousemove = null;\n'\
+       '       }\n'\
+       '       function onKeyPress(e) {\n'\
+       '               var c = e.charCode;\n'\
+       '               if(c != 42 && c != 43 && c != 45) return;\n'\
+       '               var click = document.createEvent("Events");\n'\
+       '               click.initEvent("click", true, false);\n'\
+       '               if(c == 43)  \n'\
+       '                       document.getElementById("zoomin").dispatchEvent(click);\n'\
+       '               else if(c == 45)\n'\
+       '                       document.getElementById("zoomout").dispatchEvent(click);\n'\
+       '               else if(c == 42)\n'\
+       '                       document.getElementById("zoomdef").dispatchEvent(click);\n'\
+       '       }\n'\
        '       window.addEventListener("resize", function () {zoomTimeline();});\n'\
        '       window.addEventListener("load", function () {\n'\
        '               var dmesg = document.getElementById("dmesg");\n'\
        '               dmesg.style.width = "100%"\n'\
+       '               dmesg.onmousedown = onMouseDown;\n'\
+       '               document.onmouseup = onMouseUp;\n'\
+       '               document.onkeypress = onKeyPress;\n'\
        '               document.getElementById("zoomin").onclick = zoomTimeline;\n'\
        '               document.getElementById("zoomout").onclick = zoomTimeline;\n'\
        '               document.getElementById("zoomdef").onclick = zoomTimeline;\n'\
        '               var list = document.getElementsByClassName("square");\n'\
        '               for (var i = 0; i < list.length; i++)\n'\
        '                       list[i].onclick = onClickPhase;\n'\
+       '               var list = document.getElementsByClassName("err");\n'\
+       '               for (var i = 0; i < list.length; i++)\n'\
+       '                       list[i].onclick = errWindow;\n'\
        '               var list = document.getElementsByClassName("logbtn");\n'\
        '               for (var i = 0; i < list.length; i++)\n'\
        '                       list[i].onclick = logWindow;\n'\
@@ -3734,9 +4103,7 @@ def addScriptCode(hf, testruns):
 #       Execute system suspend through the sysfs interface, then copy the output
 #       dmesg and ftrace files to the test output directory.
 def executeSuspend():
-       global sysvals
-
-       t0 = time.time()*1000
+       pm = ProcessMonitor()
        tp = sysvals.tpath
        fwdata = []
        # mark the start point in the kernel ring buffer just as we start
@@ -3745,30 +4112,39 @@ def executeSuspend():
        if(sysvals.usecallgraph or sysvals.usetraceevents):
                print('START TRACING')
                sysvals.fsetVal('1', 'tracing_on')
+               if sysvals.useprocmon:
+                       pm.start()
        # execute however many s/r runs requested
        for count in range(1,sysvals.execcount+1):
-               # if this is test2 and there's a delay, start here
+               # x2delay in between test runs
                if(count > 1 and sysvals.x2delay > 0):
-                       tN = time.time()*1000
-                       while (tN - t0) < sysvals.x2delay:
-                               tN = time.time()*1000
-                               time.sleep(0.001)
-               # initiate suspend
-               if(sysvals.usecallgraph or sysvals.usetraceevents):
-                       sysvals.fsetVal('SUSPEND START', 'trace_marker')
-               if sysvals.suspendmode == 'command':
+                       sysvals.fsetVal('WAIT %d' % sysvals.x2delay, 'trace_marker')
+                       time.sleep(sysvals.x2delay/1000.0)
+                       sysvals.fsetVal('WAIT END', 'trace_marker')
+               # start message
+               if sysvals.testcommand != '':
                        print('COMMAND START')
-                       if(sysvals.rtcwake):
-                               print('will issue an rtcwake in %d seconds' % sysvals.rtcwaketime)
-                               sysvals.rtcWakeAlarmOn()
-                       os.system(sysvals.testcommand)
                else:
                        if(sysvals.rtcwake):
                                print('SUSPEND START')
-                               print('will autoresume in %d seconds' % sysvals.rtcwaketime)
-                               sysvals.rtcWakeAlarmOn()
                        else:
                                print('SUSPEND START (press a key to resume)')
+               # set rtcwake
+               if(sysvals.rtcwake):
+                       print('will issue an rtcwake in %d seconds' % sysvals.rtcwaketime)
+                       sysvals.rtcWakeAlarmOn()
+               # start of suspend trace marker
+               if(sysvals.usecallgraph or sysvals.usetraceevents):
+                       sysvals.fsetVal('SUSPEND START', 'trace_marker')
+               # predelay delay
+               if(count == 1 and sysvals.predelay > 0):
+                       sysvals.fsetVal('WAIT %d' % sysvals.predelay, 'trace_marker')
+                       time.sleep(sysvals.predelay/1000.0)
+                       sysvals.fsetVal('WAIT END', 'trace_marker')
+               # initiate suspend or command
+               if sysvals.testcommand != '':
+                       call(sysvals.testcommand+' 2>&1', shell=True);
+               else:
                        pf = open(sysvals.powerfile, 'w')
                        pf.write(sysvals.suspendmode)
                        # execution will pause here
@@ -3776,26 +4152,27 @@ def executeSuspend():
                                pf.close()
                        except:
                                pass
-               t0 = time.time()*1000
                if(sysvals.rtcwake):
                        sysvals.rtcWakeAlarmOff()
+               # postdelay delay
+               if(count == sysvals.execcount and sysvals.postdelay > 0):
+                       sysvals.fsetVal('WAIT %d' % sysvals.postdelay, 'trace_marker')
+                       time.sleep(sysvals.postdelay/1000.0)
+                       sysvals.fsetVal('WAIT END', 'trace_marker')
                # return from suspend
                print('RESUME COMPLETE')
                if(sysvals.usecallgraph or sysvals.usetraceevents):
                        sysvals.fsetVal('RESUME COMPLETE', 'trace_marker')
-               if(sysvals.suspendmode == 'mem'):
+               if(sysvals.suspendmode == 'mem' or sysvals.suspendmode == 'command'):
                        fwdata.append(getFPDT(False))
-       # look for post resume events after the last test run
-       t = sysvals.postresumetime
-       if(t > 0):
-               print('Waiting %d seconds for POST-RESUME trace events...' % t)
-               time.sleep(t)
        # stop ftrace
        if(sysvals.usecallgraph or sysvals.usetraceevents):
+               if sysvals.useprocmon:
+                       pm.stop()
                sysvals.fsetVal('0', 'tracing_on')
                print('CAPTURING TRACE')
                writeDatafileHeader(sysvals.ftracefile, fwdata)
-               os.system('cat '+tp+'trace >> '+sysvals.ftracefile)
+               call('cat '+tp+'trace >> '+sysvals.ftracefile, shell=True)
                sysvals.fsetVal('', 'trace')
                devProps()
        # grab a copy of the dmesg output
@@ -3804,17 +4181,12 @@ def executeSuspend():
        sysvals.getdmesg()
 
 def writeDatafileHeader(filename, fwdata):
-       global sysvals
-
-       prt = sysvals.postresumetime
        fp = open(filename, 'a')
        fp.write(sysvals.teststamp+'\n')
-       if(sysvals.suspendmode == 'mem'):
+       if(sysvals.suspendmode == 'mem' or sysvals.suspendmode == 'command'):
                for fw in fwdata:
                        if(fw):
                                fp.write('# fwsuspend %u fwresume %u\n' % (fw[0], fw[1]))
-       if(prt > 0):
-               fp.write('# post resume time %u\n' % prt)
        fp.close()
 
 # Function: setUSBDevicesAuto
@@ -3824,18 +4196,16 @@ def writeDatafileHeader(filename, fwdata):
 #       to always-on since the kernel cant determine if the device can
 #       properly autosuspend
 def setUSBDevicesAuto():
-       global sysvals
-
        rootCheck(True)
        for dirname, dirnames, filenames in os.walk('/sys/devices'):
                if(re.match('.*/usb[0-9]*.*', dirname) and
                        'idVendor' in filenames and 'idProduct' in filenames):
-                       os.system('echo auto > %s/power/control' % dirname)
+                       call('echo auto > %s/power/control' % dirname, shell=True)
                        name = dirname.split('/')[-1]
-                       desc = os.popen('cat %s/product 2>/dev/null' % \
-                               dirname).read().replace('\n', '')
-                       ctrl = os.popen('cat %s/power/control 2>/dev/null' % \
-                               dirname).read().replace('\n', '')
+                       desc = Popen(['cat', '%s/product' % dirname],
+                               stderr=PIPE, stdout=PIPE).stdout.read().replace('\n', '')
+                       ctrl = Popen(['cat', '%s/power/control' % dirname],
+                               stderr=PIPE, stdout=PIPE).stdout.read().replace('\n', '')
                        print('control is %s for %6s: %s' % (ctrl, name, desc))
 
 # Function: yesno
@@ -3872,8 +4242,6 @@ def ms2nice(val):
 #       Detect all the USB hosts and devices currently connected and add
 #       a list of USB device names to sysvals for better timeline readability
 def detectUSB():
-       global sysvals
-
        field = {'idVendor':'', 'idProduct':'', 'product':'', 'speed':''}
        power = {'async':'', 'autosuspend':'', 'autosuspend_delay_ms':'',
                         'control':'', 'persist':'', 'runtime_enabled':'',
@@ -3899,12 +4267,12 @@ def detectUSB():
                if(re.match('.*/usb[0-9]*.*', dirname) and
                        'idVendor' in filenames and 'idProduct' in filenames):
                        for i in field:
-                               field[i] = os.popen('cat %s/%s 2>/dev/null' % \
-                                       (dirname, i)).read().replace('\n', '')
+                               field[i] = Popen(['cat', '%s/%s' % (dirname, i)],
+                                       stderr=PIPE, stdout=PIPE).stdout.read().replace('\n', '')
                        name = dirname.split('/')[-1]
                        for i in power:
-                               power[i] = os.popen('cat %s/power/%s 2>/dev/null' % \
-                                       (dirname, i)).read().replace('\n', '')
+                               power[i] = Popen(['cat', '%s/power/%s' % (dirname, i)],
+                                       stderr=PIPE, stdout=PIPE).stdout.read().replace('\n', '')
                        if(re.match('usb[0-9]*', name)):
                                first = '%-8s' % name
                        else:
@@ -3928,7 +4296,6 @@ def detectUSB():
 # Description:
 #       Retrieve a list of properties for all devices in the trace log
 def devProps(data=0):
-       global sysvals
        props = dict()
 
        if data:
@@ -3953,7 +4320,7 @@ def devProps(data=0):
                return
 
        if(os.path.exists(sysvals.ftracefile) == False):
-               doError('%s does not exist' % sysvals.ftracefile, False)
+               doError('%s does not exist' % sysvals.ftracefile)
 
        # first get the list of devices we need properties for
        msghead = 'Additional data added by AnalyzeSuspend'
@@ -3976,7 +4343,7 @@ def devProps(data=0):
                m = re.match('.*: (?P<drv>.*) (?P<d>.*), parent: *(?P<p>.*), .*', m.group('msg'));
                if(not m):
                        continue
-               drv, dev, par = m.group('drv'), m.group('d'), m.group('p')
+               dev = m.group('d')
                if dev not in props:
                        props[dev] = DevProps()
        tf.close()
@@ -4052,7 +4419,6 @@ def devProps(data=0):
 # Output:
 #       A string list of the available modes
 def getModes():
-       global sysvals
        modes = ''
        if(os.path.exists(sysvals.powerfile)):
                fp = open(sysvals.powerfile, 'r')
@@ -4066,8 +4432,6 @@ def getModes():
 # Arguments:
 #       output: True to output the info to stdout, False otherwise
 def getFPDT(output):
-       global sysvals
-
        rectype = {}
        rectype[0] = 'Firmware Basic Boot Performance Record'
        rectype[1] = 'S3 Performance Table Record'
@@ -4078,19 +4442,19 @@ def getFPDT(output):
        rootCheck(True)
        if(not os.path.exists(sysvals.fpdtpath)):
                if(output):
-                       doError('file does not exist: %s' % sysvals.fpdtpath, False)
+                       doError('file does not exist: %s' % sysvals.fpdtpath)
                return False
        if(not os.access(sysvals.fpdtpath, os.R_OK)):
                if(output):
-                       doError('file is not readable: %s' % sysvals.fpdtpath, False)
+                       doError('file is not readable: %s' % sysvals.fpdtpath)
                return False
        if(not os.path.exists(sysvals.mempath)):
                if(output):
-                       doError('file does not exist: %s' % sysvals.mempath, False)
+                       doError('file does not exist: %s' % sysvals.mempath)
                return False
        if(not os.access(sysvals.mempath, os.R_OK)):
                if(output):
-                       doError('file is not readable: %s' % sysvals.mempath, False)
+                       doError('file is not readable: %s' % sysvals.mempath)
                return False
 
        fp = open(sysvals.fpdtpath, 'rb')
@@ -4100,7 +4464,7 @@ def getFPDT(output):
        if(len(buf) < 36):
                if(output):
                        doError('Invalid FPDT table data, should '+\
-                               'be at least 36 bytes', False)
+                               'be at least 36 bytes')
                return False
 
        table = struct.unpack('4sIBB6s8sI4sI', buf[0:36])
@@ -4199,7 +4563,6 @@ def getFPDT(output):
 # Output:
 #       True if the test will work, False if not
 def statusCheck(probecheck=False):
-       global sysvals
        status = True
 
        print('Checking this system (%s)...' % platform.node())
@@ -4282,37 +4645,14 @@ def statusCheck(probecheck=False):
        if not probecheck:
                return status
 
-       if (sysvals.usecallgraph and len(sysvals.debugfuncs) > 0) or len(sysvals.kprobes) > 0:
-               sysvals.initFtrace(True)
-
-       # verify callgraph debugfuncs
-       if sysvals.usecallgraph and len(sysvals.debugfuncs) > 0:
-               print('    verifying these ftrace callgraph functions work:')
-               sysvals.setFtraceFilterFunctions(sysvals.debugfuncs)
-               fp = open(sysvals.tpath+'set_graph_function', 'r')
-               flist = fp.read().split('\n')
-               fp.close()
-               for func in sysvals.debugfuncs:
-                       res = sysvals.colorText('NO')
-                       if func in flist:
-                               res = 'YES'
-                       else:
-                               for i in flist:
-                                       if ' [' in i and func == i.split(' ')[0]:
-                                               res = 'YES'
-                                               break
-                       print('         %s: %s' % (func, res))
-
        # verify kprobes
-       if len(sysvals.kprobes) > 0:
-               print('    verifying these kprobes work:')
-               for name in sorted(sysvals.kprobes):
-                       if name in sysvals.tracefuncs:
-                               continue
-                       res = sysvals.colorText('NO')
-                       if sysvals.testKprobe(sysvals.kprobes[name]):
-                               res = 'YES'
-                       print('         %s: %s' % (name, res))
+       if sysvals.usekprobes:
+               for name in sysvals.tracefuncs:
+                       sysvals.defaultKprobe(name, sysvals.tracefuncs[name])
+               if sysvals.usedevsrc:
+                       for name in sysvals.dev_tracefuncs:
+                               sysvals.defaultKprobe(name, sysvals.dev_tracefuncs[name])
+               sysvals.addKprobes(True)
 
        return status
 
@@ -4322,33 +4662,20 @@ def statusCheck(probecheck=False):
 # Arguments:
 #       msg: the error message to print
 #       help: True if printHelp should be called after, False otherwise
-def doError(msg, help):
+def doError(msg, help=False):
        if(help == True):
                printHelp()
        print('ERROR: %s\n') % msg
        sys.exit()
 
-# Function: doWarning
-# Description:
-#       generic warning function for non-catastrophic anomalies
-# Arguments:
-#       msg: the warning message to print
-#       file: If not empty, a filename to request be sent to the owner for debug
-def doWarning(msg, file=''):
-       print('/* %s */') % msg
-       if(file):
-               print('/* For a fix, please send this'+\
-                       ' %s file to <todd.e.brandt@intel.com> */' % file)
-
 # Function: rootCheck
 # Description:
 #       quick check to see if we have root access
 def rootCheck(fatal):
-       global sysvals
        if(os.access(sysvals.powerfile, os.W_OK)):
                return True
        if fatal:
-               doError('This command must be run as root', False)
+               doError('This command must be run as root')
        return False
 
 # Function: getArgInt
@@ -4389,71 +4716,61 @@ def getArgFloat(name, args, min, max, main=True):
                doError(name+': value should be between %f and %f' % (min, max), True)
        return val
 
-# Function: rerunTest
-# Description:
-#       generate an output from an existing set of ftrace/dmesg logs
-def rerunTest():
-       global sysvals
-
-       if(sysvals.ftracefile != ''):
-               doesTraceLogHaveTraceEvents()
-       if(sysvals.dmesgfile == '' and not sysvals.usetraceeventsonly):
-               doError('recreating this html output '+\
-                       'requires a dmesg file', False)
-       sysvals.setOutputFile()
-       vprint('Output file: %s' % sysvals.htmlfile)
+def processData():
        print('PROCESSING DATA')
        if(sysvals.usetraceeventsonly):
                testruns = parseTraceLog()
+               if sysvals.dmesgfile:
+                       dmesgtext = loadKernelLog(True)
+                       for data in testruns:
+                               data.extractErrorInfo(dmesgtext)
        else:
                testruns = loadKernelLog()
                for data in testruns:
                        parseKernelLog(data)
-               if(sysvals.ftracefile != ''):
+               if(sysvals.ftracefile and (sysvals.usecallgraph or sysvals.usetraceevents)):
                        appendIncompleteTraceLog(testruns)
        createHTML(testruns)
 
+# Function: rerunTest
+# Description:
+#       generate an output from an existing set of ftrace/dmesg logs
+def rerunTest():
+       if sysvals.ftracefile:
+               doesTraceLogHaveTraceEvents()
+       if not sysvals.dmesgfile and not sysvals.usetraceeventsonly:
+               doError('recreating this html output requires a dmesg file')
+       sysvals.setOutputFile()
+       vprint('Output file: %s' % sysvals.htmlfile)
+       if(os.path.exists(sysvals.htmlfile) and not os.access(sysvals.htmlfile, os.W_OK)):
+               doError('missing permission to write to %s' % sysvals.htmlfile)
+       processData()
+
 # Function: runTest
 # Description:
 #       execute a suspend/resume, gather the logs, and generate the output
 def runTest(subdir, testpath=''):
-       global sysvals
-
        # prepare for the test
        sysvals.initFtrace()
        sysvals.initTestOutput(subdir, testpath)
-
-       vprint('Output files:\n    %s' % sysvals.dmesgfile)
-       if(sysvals.usecallgraph or
-               sysvals.usetraceevents or
-               sysvals.usetraceeventsonly):
-               vprint('    %s' % sysvals.ftracefile)
-       vprint('    %s' % sysvals.htmlfile)
+       vprint('Output files:\n\t%s\n\t%s\n\t%s' % \
+               (sysvals.dmesgfile, sysvals.ftracefile, sysvals.htmlfile))
 
        # execute the test
        executeSuspend()
        sysvals.cleanupFtrace()
+       processData()
 
-       # analyze the data and create the html output
-       print('PROCESSING DATA')
-       if(sysvals.usetraceeventsonly):
-               # data for kernels 3.15 or newer is entirely in ftrace
-               testruns = parseTraceLog()
-       else:
-               # data for kernels older than 3.15 is primarily in dmesg
-               testruns = loadKernelLog()
-               for data in testruns:
-                       parseKernelLog(data)
-               if(sysvals.usecallgraph or sysvals.usetraceevents):
-                       appendIncompleteTraceLog(testruns)
-       createHTML(testruns)
+       # if running as root, change output dir owner to sudo_user
+       if os.path.isdir(sysvals.testdir) and os.getuid() == 0 and \
+               'SUDO_USER' in os.environ:
+               cmd = 'chown -R {0}:{0} {1} > /dev/null 2>&1'
+               call(cmd.format(os.environ['SUDO_USER'], sysvals.testdir), shell=True)
 
 # Function: runSummary
 # Description:
 #       create a summary of tests in a sub-directory
 def runSummary(subdir, output):
-       global sysvals
-
        # get a list of ftrace output files
        files = []
        for dirname, dirnames, filenames in os.walk(subdir):
@@ -4509,12 +4826,12 @@ def checkArgBool(value):
 # Description:
 #       Configure the script via the info in a config file
 def configFromFile(file):
-       global sysvals
        Config = ConfigParser.ConfigParser()
 
-       ignorekprobes = False
        Config.read(file)
        sections = Config.sections()
+       overridekprobes = False
+       overridedevkprobes = False
        if 'Settings' in sections:
                for opt in Config.options('Settings'):
                        value = Config.get('Settings', opt).lower()
@@ -4524,19 +4841,19 @@ def configFromFile(file):
                                sysvals.addlogs = checkArgBool(value)
                        elif(opt.lower() == 'dev'):
                                sysvals.usedevsrc = checkArgBool(value)
-                       elif(opt.lower() == 'ignorekprobes'):
-                               ignorekprobes = checkArgBool(value)
+                       elif(opt.lower() == 'proc'):
+                               sysvals.useprocmon = checkArgBool(value)
                        elif(opt.lower() == 'x2'):
                                if checkArgBool(value):
                                        sysvals.execcount = 2
                        elif(opt.lower() == 'callgraph'):
                                sysvals.usecallgraph = checkArgBool(value)
-                       elif(opt.lower() == 'callgraphfunc'):
-                               sysvals.debugfuncs = []
-                               if value:
-                                       value = value.split(',')
-                               for i in value:
-                                       sysvals.debugfuncs.append(i.strip())
+                       elif(opt.lower() == 'override-timeline-functions'):
+                               overridekprobes = checkArgBool(value)
+                       elif(opt.lower() == 'override-dev-timeline-functions'):
+                               overridedevkprobes = checkArgBool(value)
+                       elif(opt.lower() == 'devicefilter'):
+                               sysvals.setDeviceFilter(value)
                        elif(opt.lower() == 'expandcg'):
                                sysvals.cgexp = checkArgBool(value)
                        elif(opt.lower() == 'srgap'):
@@ -4548,8 +4865,10 @@ def configFromFile(file):
                                sysvals.testcommand = value
                        elif(opt.lower() == 'x2delay'):
                                sysvals.x2delay = getArgInt('-x2delay', value, 0, 60000, False)
-                       elif(opt.lower() == 'postres'):
-                               sysvals.postresumetime = getArgInt('-postres', value, 0, 3600, False)
+                       elif(opt.lower() == 'predelay'):
+                               sysvals.predelay = getArgInt('-predelay', value, 0, 60000, False)
+                       elif(opt.lower() == 'postdelay'):
+                               sysvals.postdelay = getArgInt('-postdelay', value, 0, 60000, False)
                        elif(opt.lower() == 'rtcwake'):
                                sysvals.rtcwake = True
                                sysvals.rtcwaketime = getArgInt('-rtcwake', value, 0, 3600, False)
@@ -4557,53 +4876,50 @@ def configFromFile(file):
                                sysvals.setPrecision(getArgInt('-timeprec', value, 0, 6, False))
                        elif(opt.lower() == 'mindev'):
                                sysvals.mindevlen = getArgFloat('-mindev', value, 0.0, 10000.0, False)
+                       elif(opt.lower() == 'callloop-maxgap'):
+                               sysvals.callloopmaxgap = getArgFloat('-callloop-maxgap', value, 0.0, 1.0, False)
+                       elif(opt.lower() == 'callloop-maxlen'):
+                               sysvals.callloopmaxgap = getArgFloat('-callloop-maxlen', value, 0.0, 1.0, False)
                        elif(opt.lower() == 'mincg'):
                                sysvals.mincglen = getArgFloat('-mincg', value, 0.0, 10000.0, False)
-                       elif(opt.lower() == 'kprobecolor'):
-                               try:
-                                       val = int(value, 16)
-                                       sysvals.kprobecolor = '#'+value
-                               except:
-                                       sysvals.kprobecolor = value
-                       elif(opt.lower() == 'synccolor'):
-                               try:
-                                       val = int(value, 16)
-                                       sysvals.synccolor = '#'+value
-                               except:
-                                       sysvals.synccolor = value
                        elif(opt.lower() == 'output-dir'):
-                               args = dict()
-                               n = datetime.now()
-                               args['date'] = n.strftime('%y%m%d')
-                               args['time'] = n.strftime('%H%M%S')
-                               args['hostname'] = sysvals.hostname
-                               sysvals.outdir = value.format(**args)
+                               sysvals.setOutputFolder(value)
 
        if sysvals.suspendmode == 'command' and not sysvals.testcommand:
-               doError('No command supplied for mode "command"', False)
+               doError('No command supplied for mode "command"')
+
+       # compatibility errors
        if sysvals.usedevsrc and sysvals.usecallgraph:
-               doError('dev and callgraph cannot both be true', False)
-       if sysvals.usecallgraph and sysvals.execcount > 1:
-               doError('-x2 is not compatible with -f', False)
+               doError('-dev is not compatible with -f')
+       if sysvals.usecallgraph and sysvals.useprocmon:
+               doError('-proc is not compatible with -f')
 
-       if ignorekprobes:
-               return
+       if overridekprobes:
+               sysvals.tracefuncs = dict()
+       if overridedevkprobes:
+               sysvals.dev_tracefuncs = dict()
 
        kprobes = dict()
-       archkprobe = 'Kprobe_'+platform.machine()
-       if archkprobe in sections:
-               for name in Config.options(archkprobe):
-                       kprobes[name] = Config.get(archkprobe, name)
-       if 'Kprobe' in sections:
-               for name in Config.options('Kprobe'):
-                       kprobes[name] = Config.get('Kprobe', name)
+       kprobesec = 'dev_timeline_functions_'+platform.machine()
+       if kprobesec in sections:
+               for name in Config.options(kprobesec):
+                       text = Config.get(kprobesec, name)
+                       kprobes[name] = (text, True)
+       kprobesec = 'timeline_functions_'+platform.machine()
+       if kprobesec in sections:
+               for name in Config.options(kprobesec):
+                       if name in kprobes:
+                               doError('Duplicate timeline function found "%s"' % (name))
+                       text = Config.get(kprobesec, name)
+                       kprobes[name] = (text, False)
 
        for name in kprobes:
                function = name
                format = name
                color = ''
                args = dict()
-               data = kprobes[name].split()
+               text, dev = kprobes[name]
+               data = text.split()
                i = 0
                for val in data:
                        # bracketted strings are special formatting, read them separately
@@ -4626,28 +4942,30 @@ def configFromFile(file):
                                args[d[0]] = d[1]
                        i += 1
                if not function or not format:
-                       doError('Invalid kprobe: %s' % name, False)
+                       doError('Invalid kprobe: %s' % name)
                for arg in re.findall('{(?P<n>[a-z,A-Z,0-9]*)}', format):
                        if arg not in args:
-                               doError('Kprobe "%s" is missing argument "%s"' % (name, arg), False)
-               if name in sysvals.kprobes:
-                       doError('Duplicate kprobe found "%s"' % (name), False)
-               vprint('Adding KPROBE: %s %s %s %s' % (name, function, format, args))
-               sysvals.kprobes[name] = {
+                               doError('Kprobe "%s" is missing argument "%s"' % (name, arg))
+               if (dev and name in sysvals.dev_tracefuncs) or (not dev and name in sysvals.tracefuncs):
+                       doError('Duplicate timeline function found "%s"' % (name))
+
+               kp = {
                        'name': name,
                        'func': function,
                        'format': format,
-                       'args': args,
-                       'mask': re.sub('{(?P<n>[a-z,A-Z,0-9]*)}', '.*', format)
+                       sysvals.archargs: args
                }
                if color:
-                       sysvals.kprobes[name]['color'] = color
+                       kp['color'] = color
+               if dev:
+                       sysvals.dev_tracefuncs[name] = kp
+               else:
+                       sysvals.tracefuncs[name] = kp
 
 # Function: printHelp
 # Description:
 #       print out the help text
 def printHelp():
-       global sysvals
        modes = getModes()
 
        print('')
@@ -4670,44 +4988,47 @@ def printHelp():
        print('')
        print('Options:')
        print('  [general]')
-       print('    -h          Print this help text')
-       print('    -v          Print the current tool version')
-       print('    -config file Pull arguments and config options from a file')
-       print('    -verbose    Print extra information during execution and analysis')
-       print('    -status     Test to see if the system is enabled to run this tool')
-       print('    -modes      List available suspend modes')
-       print('    -m mode     Mode to initiate for suspend %s (default: %s)') % (modes, sysvals.suspendmode)
-       print('    -o subdir   Override the output subdirectory')
+       print('   -h           Print this help text')
+       print('   -v           Print the current tool version')
+       print('   -config fn   Pull arguments and config options from file fn')
+       print('   -verbose     Print extra information during execution and analysis')
+       print('   -status      Test to see if the system is enabled to run this tool')
+       print('   -modes       List available suspend modes')
+       print('   -m mode      Mode to initiate for suspend %s (default: %s)') % (modes, sysvals.suspendmode)
+       print('   -o subdir    Override the output subdirectory')
+       print('   -rtcwake t   Use rtcwake to autoresume after <t> seconds (default: disabled)')
+       print('   -addlogs     Add the dmesg and ftrace logs to the html output')
+       print('   -srgap       Add a visible gap in the timeline between sus/res (default: disabled)')
        print('  [advanced]')
-       print('    -rtcwake t  Use rtcwake to autoresume after <t> seconds (default: disabled)')
-       print('    -addlogs    Add the dmesg and ftrace logs to the html output')
-       print('    -multi n d  Execute <n> consecutive tests at <d> seconds intervals. The outputs will')
+       print('   -cmd {s}     Run the timeline over a custom command, e.g. "sync -d"')
+       print('   -proc        Add usermode process info into the timeline (default: disabled)')
+       print('   -dev         Add kernel function calls and threads to the timeline (default: disabled)')
+       print('   -x2          Run two suspend/resumes back to back (default: disabled)')
+       print('   -x2delay t   Include t ms delay between multiple test runs (default: 0 ms)')
+       print('   -predelay t  Include t ms delay before 1st suspend (default: 0 ms)')
+       print('   -postdelay t Include t ms delay after last resume (default: 0 ms)')
+       print('   -mindev ms   Discard all device blocks shorter than ms milliseconds (e.g. 0.001 for us)')
+       print('   -multi n d   Execute <n> consecutive tests at <d> seconds intervals. The outputs will')
        print('                be created in a new subdirectory with a summary page.')
-       print('    -srgap      Add a visible gap in the timeline between sus/res (default: disabled)')
-       print('    -cmd {s}    Instead of suspend/resume, run a command, e.g. "sync -d"')
-       print('    -mindev ms  Discard all device blocks shorter than ms milliseconds (e.g. 0.001 for us)')
-       print('    -mincg  ms  Discard all callgraphs shorter than ms milliseconds (e.g. 0.001 for us)')
-       print('    -timeprec N Number of significant digits in timestamps (0:S, [3:ms], 6:us)')
        print('  [debug]')
-       print('    -f          Use ftrace to create device callgraphs (default: disabled)')
-       print('    -expandcg   pre-expand the callgraph data in the html output (default: disabled)')
-       print('    -flist      Print the list of functions currently being captured in ftrace')
-       print('    -flistall   Print all functions capable of being captured in ftrace')
-       print('    -fadd file  Add functions to be graphed in the timeline from a list in a text file')
-       print('    -filter "d1 d2 ..." Filter out all but this list of device names')
-       print('    -dev        Display common low level functions in the timeline')
-       print('  [post-resume task analysis]')
-       print('    -x2         Run two suspend/resumes back to back (default: disabled)')
-       print('    -x2delay t  Minimum millisecond delay <t> between the two test runs (default: 0 ms)')
-       print('    -postres t  Time after resume completion to wait for post-resume events (default: 0 S)')
+       print('   -f           Use ftrace to create device callgraphs (default: disabled)')
+       print('   -expandcg    pre-expand the callgraph data in the html output (default: disabled)')
+       print('   -flist       Print the list of functions currently being captured in ftrace')
+       print('   -flistall    Print all functions capable of being captured in ftrace')
+       print('   -fadd file   Add functions to be graphed in the timeline from a list in a text file')
+       print('   -filter "d1,d2,..." Filter out all but this comma-delimited list of device names')
+       print('   -mincg  ms   Discard all callgraphs shorter than ms milliseconds (e.g. 0.001 for us)')
+       print('   -cgphase P   Only show callgraph data for phase P (e.g. suspend_late)')
+       print('   -cgtest N    Only show callgraph data for test N (e.g. 0 or 1 in an x2 run)')
+       print('   -timeprec N  Number of significant digits in timestamps (0:S, [3:ms], 6:us)')
        print('  [utilities]')
-       print('    -fpdt       Print out the contents of the ACPI Firmware Performance Data Table')
-       print('    -usbtopo    Print out the current USB topology with power info')
-       print('    -usbauto    Enable autosuspend for all connected USB devices')
+       print('   -fpdt        Print out the contents of the ACPI Firmware Performance Data Table')
+       print('   -usbtopo     Print out the current USB topology with power info')
+       print('   -usbauto     Enable autosuspend for all connected USB devices')
        print('  [re-analyze data from previous runs]')
-       print('    -ftrace ftracefile  Create HTML output using ftrace input')
-       print('    -dmesg dmesgfile    Create HTML output using dmesg (not needed for kernel >= 3.15)')
-       print('    -summary directory  Create a summary of all test in this dir')
+       print('   -ftrace ftracefile  Create HTML output using ftrace input')
+       print('   -dmesg dmesgfile    Create HTML output using dmesg (not needed for kernel >= 3.15)')
+       print('   -summary directory  Create a summary of all test in this dir')
        print('')
        return True
 
@@ -4739,26 +5060,22 @@ if __name__ == '__main__':
                        sys.exit()
                elif(arg == '-x2'):
                        sysvals.execcount = 2
-                       if(sysvals.usecallgraph):
-                               doError('-x2 is not compatible with -f', False)
                elif(arg == '-x2delay'):
                        sysvals.x2delay = getArgInt('-x2delay', args, 0, 60000)
-               elif(arg == '-postres'):
-                       sysvals.postresumetime = getArgInt('-postres', args, 0, 3600)
+               elif(arg == '-predelay'):
+                       sysvals.predelay = getArgInt('-predelay', args, 0, 60000)
+               elif(arg == '-postdelay'):
+                       sysvals.postdelay = getArgInt('-postdelay', args, 0, 60000)
                elif(arg == '-f'):
                        sysvals.usecallgraph = True
-                       if(sysvals.execcount > 1):
-                               doError('-x2 is not compatible with -f', False)
-                       if(sysvals.usedevsrc):
-                               doError('-dev is not compatible with -f', False)
                elif(arg == '-addlogs'):
                        sysvals.addlogs = True
                elif(arg == '-verbose'):
                        sysvals.verbose = True
+               elif(arg == '-proc'):
+                       sysvals.useprocmon = True
                elif(arg == '-dev'):
                        sysvals.usedevsrc = True
-                       if(sysvals.usecallgraph):
-                               doError('-dev is not compatible with -f', False)
                elif(arg == '-rtcwake'):
                        sysvals.rtcwake = True
                        sysvals.rtcwaketime = getArgInt('-rtcwake', args, 0, 3600)
@@ -4768,6 +5085,21 @@ if __name__ == '__main__':
                        sysvals.mindevlen = getArgFloat('-mindev', args, 0.0, 10000.0)
                elif(arg == '-mincg'):
                        sysvals.mincglen = getArgFloat('-mincg', args, 0.0, 10000.0)
+               elif(arg == '-cgtest'):
+                       sysvals.cgtest = getArgInt('-cgtest', args, 0, 1)
+               elif(arg == '-cgphase'):
+                       try:
+                               val = args.next()
+                       except:
+                               doError('No phase name supplied', True)
+                       d = Data(0)
+                       if val not in d.phases:
+                               doError('Invalid phase, valid phaess are %s' % d.phases, True)
+                       sysvals.cgphase = val
+               elif(arg == '-callloop-maxgap'):
+                       sysvals.callloopmaxgap = getArgFloat('-callloop-maxgap', args, 0.0, 1.0)
+               elif(arg == '-callloop-maxlen'):
+                       sysvals.callloopmaxlen = getArgFloat('-callloop-maxlen', args, 0.0, 1.0)
                elif(arg == '-cmd'):
                        try:
                                val = args.next()
@@ -4788,14 +5120,14 @@ if __name__ == '__main__':
                                val = args.next()
                        except:
                                doError('No subdirectory name supplied', True)
-                       sysvals.outdir = val
+                       sysvals.setOutputFolder(val)
                elif(arg == '-config'):
                        try:
                                val = args.next()
                        except:
                                doError('No text file supplied', True)
                        if(os.path.exists(val) == False):
-                               doError('%s does not exist' % val, False)
+                               doError('%s does not exist' % val)
                        configFromFile(val)
                elif(arg == '-fadd'):
                        try:
@@ -4803,7 +5135,7 @@ if __name__ == '__main__':
                        except:
                                doError('No text file supplied', True)
                        if(os.path.exists(val) == False):
-                               doError('%s does not exist' % val, False)
+                               doError('%s does not exist' % val)
                        sysvals.addFtraceFilterFunctions(val)
                elif(arg == '-dmesg'):
                        try:
@@ -4813,7 +5145,7 @@ if __name__ == '__main__':
                        sysvals.notestrun = True
                        sysvals.dmesgfile = val
                        if(os.path.exists(sysvals.dmesgfile) == False):
-                               doError('%s does not exist' % sysvals.dmesgfile, False)
+                               doError('%s does not exist' % sysvals.dmesgfile)
                elif(arg == '-ftrace'):
                        try:
                                val = args.next()
@@ -4822,7 +5154,7 @@ if __name__ == '__main__':
                        sysvals.notestrun = True
                        sysvals.ftracefile = val
                        if(os.path.exists(sysvals.ftracefile) == False):
-                               doError('%s does not exist' % sysvals.ftracefile, False)
+                               doError('%s does not exist' % sysvals.ftracefile)
                elif(arg == '-summary'):
                        try:
                                val = args.next()
@@ -4832,7 +5164,7 @@ if __name__ == '__main__':
                        cmdarg = val
                        sysvals.notestrun = True
                        if(os.path.isdir(val) == False):
-                               doError('%s is not accesible' % val, False)
+                               doError('%s is not accesible' % val)
                elif(arg == '-filter'):
                        try:
                                val = args.next()
@@ -4842,6 +5174,12 @@ if __name__ == '__main__':
                else:
                        doError('Invalid argument: '+arg, True)
 
+       # compatibility errors
+       if(sysvals.usecallgraph and sysvals.usedevsrc):
+               doError('-dev is not compatible with -f')
+       if(sysvals.usecallgraph and sysvals.useprocmon):
+               doError('-proc is not compatible with -f')
+
        # callgraph size cannot exceed device size
        if sysvals.mincglen < sysvals.mindevlen:
                sysvals.mincglen = sysvals.mindevlen
@@ -4855,8 +5193,7 @@ if __name__ == '__main__':
                elif(cmd == 'usbtopo'):
                        detectUSB()
                elif(cmd == 'modes'):
-                       modes = getModes()
-                       print modes
+                       print getModes()
                elif(cmd == 'flist'):
                        sysvals.getFtraceFilterFunctions(True)
                elif(cmd == 'flistall'):
index 06121ce524a76006072459d352d727b4aebdf203..c9235d8340f1e7ba33eacfaee94642c18f5fd211 100644 (file)
@@ -44,7 +44,7 @@ char *cur_filename, *source_file;
 int in_source_file;
 
 static int flag_debug, flag_dump_defs, flag_reference, flag_dump_types,
-          flag_preserve, flag_warnings;
+          flag_preserve, flag_warnings, flag_rel_crcs;
 static const char *mod_prefix = "";
 
 static int errors;
@@ -693,7 +693,10 @@ void export_symbol(const char *name)
                        fputs(">\n", debugfile);
 
                /* Used as a linker script. */
-               printf("%s__crc_%s = 0x%08lx ;\n", mod_prefix, name, crc);
+               printf(!flag_rel_crcs ? "%s__crc_%s = 0x%08lx;\n" :
+                      "SECTIONS { .rodata : ALIGN(4) { "
+                      "%s__crc_%s = .; LONG(0x%08lx); } }\n",
+                      mod_prefix, name, crc);
        }
 }
 
@@ -730,7 +733,7 @@ void error_with_pos(const char *fmt, ...)
 
 static void genksyms_usage(void)
 {
-       fputs("Usage:\n" "genksyms [-adDTwqhV] > /path/to/.tmp_obj.ver\n" "\n"
+       fputs("Usage:\n" "genksyms [-adDTwqhVR] > /path/to/.tmp_obj.ver\n" "\n"
 #ifdef __GNU_LIBRARY__
              "  -s, --symbol-prefix   Select symbol prefix\n"
              "  -d, --debug           Increment the debug level (repeatable)\n"
@@ -742,6 +745,7 @@ static void genksyms_usage(void)
              "  -q, --quiet           Disable warnings (default)\n"
              "  -h, --help            Print this message\n"
              "  -V, --version         Print the release version\n"
+             "  -R, --relative-crc    Emit section relative symbol CRCs\n"
 #else                          /* __GNU_LIBRARY__ */
              "  -s                    Select symbol prefix\n"
              "  -d                    Increment the debug level (repeatable)\n"
@@ -753,6 +757,7 @@ static void genksyms_usage(void)
              "  -q                    Disable warnings (default)\n"
              "  -h                    Print this message\n"
              "  -V                    Print the release version\n"
+             "  -R                    Emit section relative symbol CRCs\n"
 #endif                         /* __GNU_LIBRARY__ */
              , stderr);
 }
@@ -774,13 +779,14 @@ int main(int argc, char **argv)
                {"preserve", 0, 0, 'p'},
                {"version", 0, 0, 'V'},
                {"help", 0, 0, 'h'},
+               {"relative-crc", 0, 0, 'R'},
                {0, 0, 0, 0}
        };
 
-       while ((o = getopt_long(argc, argv, "s:dwqVDr:T:ph",
+       while ((o = getopt_long(argc, argv, "s:dwqVDr:T:phR",
                                &long_opts[0], NULL)) != EOF)
 #else                          /* __GNU_LIBRARY__ */
-       while ((o = getopt(argc, argv, "s:dwqVDr:T:ph")) != EOF)
+       while ((o = getopt(argc, argv, "s:dwqVDr:T:phR")) != EOF)
 #endif                         /* __GNU_LIBRARY__ */
                switch (o) {
                case 's':
@@ -823,6 +829,9 @@ int main(int argc, char **argv)
                case 'h':
                        genksyms_usage();
                        return 0;
+               case 'R':
+                       flag_rel_crcs = 1;
+                       break;
                default:
                        genksyms_usage();
                        return 1;
index 299b92ca1ae092d82e9a0e3bffaec45988ebcc37..5d554419170b7d54ec82ddb1d31093d3eab0aa7d 100644 (file)
@@ -219,6 +219,10 @@ static int symbol_valid(struct sym_entry *s)
                "_SDA2_BASE_",          /* ppc */
                NULL };
 
+       static char *special_prefixes[] = {
+               "__crc_",               /* modversions */
+               NULL };
+
        static char *special_suffixes[] = {
                "_veneer",              /* arm */
                "_from_arm",            /* arm */
@@ -259,6 +263,14 @@ static int symbol_valid(struct sym_entry *s)
                if (strcmp(sym_name, special_symbols[i]) == 0)
                        return 0;
 
+       for (i = 0; special_prefixes[i]; i++) {
+               int l = strlen(special_prefixes[i]);
+
+               if (l <= strlen(sym_name) &&
+                   strncmp(sym_name, special_prefixes[i], l) == 0)
+                       return 0;
+       }
+
        for (i = 0; special_suffixes[i]; i++) {
                int l = strlen(sym_name) - strlen(special_suffixes[i]);
 
index 29c89a6bad3d3ac34e539189e83769f1c63ddab3..4dedd0d3d3a7fda58af2bc6150b9f6b6195d2cac 100644 (file)
@@ -621,6 +621,16 @@ static void handle_modversions(struct module *mod, struct elf_info *info,
        if (strncmp(symname, CRC_PFX, strlen(CRC_PFX)) == 0) {
                is_crc = true;
                crc = (unsigned int) sym->st_value;
+               if (sym->st_shndx != SHN_UNDEF && sym->st_shndx != SHN_ABS) {
+                       unsigned int *crcp;
+
+                       /* symbol points to the CRC in the ELF object */
+                       crcp = (void *)info->hdr + sym->st_value +
+                              info->sechdrs[sym->st_shndx].sh_offset -
+                              (info->hdr->e_type != ET_REL ?
+                               info->sechdrs[sym->st_shndx].sh_addr : 0);
+                       crc = *crcp;
+               }
                sym_update_crc(symname + strlen(CRC_PFX), mod, crc,
                                export);
        }
index 5d721e990876d8b282867523283e4acda7c22930..f067be814626af5055475cec73dabbce2081f326 100644 (file)
@@ -78,12 +78,6 @@ static inline void *kvzalloc(size_t size)
        return __aa_kvmalloc(size, __GFP_ZERO);
 }
 
-/* returns 0 if kref not incremented */
-static inline int kref_get_not0(struct kref *kref)
-{
-       return atomic_inc_not_zero(&kref->refcount);
-}
-
 /**
  * aa_strneq - compare null terminated @str to a non null terminated substring
  * @str: a null terminated string
index 52275f040a5f685e4c20b4d829d47c4c8223644f..46467aaa557b4b0c8d80b11e07d845d867009ba4 100644 (file)
@@ -287,7 +287,7 @@ static inline struct aa_profile *aa_get_profile(struct aa_profile *p)
  */
 static inline struct aa_profile *aa_get_profile_not0(struct aa_profile *p)
 {
-       if (p && kref_get_not0(&p->count))
+       if (p && kref_get_unless_zero(&p->count))
                return p;
 
        return NULL;
@@ -307,7 +307,7 @@ static inline struct aa_profile *aa_get_profile_rcu(struct aa_profile __rcu **p)
        rcu_read_lock();
        do {
                c = rcu_dereference(*p);
-       } while (c && !kref_get_not0(&c->count));
+       } while (c && !kref_get_unless_zero(&c->count));
        rcu_read_unlock();
 
        return c;
index c7c6619431d5fb4922dd729e9e49dd910d7967d2..d98550abe16d40250be4327197c48866953b5645 100644 (file)
@@ -5887,7 +5887,7 @@ static int selinux_setprocattr(struct task_struct *p,
                return error;
 
        /* Obtain a SID for the context, if one was specified. */
-       if (size && str[1] && str[1] != '\n') {
+       if (size && str[0] && str[0] != '\n') {
                if (str[size-1] == '\n') {
                        str[size-1] = 0;
                        size--;
index c850345c43b53dd5616b155f34f741d0ca30701c..dfa5156f35856324d86f05315edfb2ef9cf4e74e 100644 (file)
@@ -419,7 +419,6 @@ int snd_seq_pool_done(struct snd_seq_pool *pool)
 {
        unsigned long flags;
        struct snd_seq_event_cell *ptr;
-       int max_count = 5 * HZ;
 
        if (snd_BUG_ON(!pool))
                return -EINVAL;
@@ -432,14 +431,8 @@ int snd_seq_pool_done(struct snd_seq_pool *pool)
        if (waitqueue_active(&pool->output_sleep))
                wake_up(&pool->output_sleep);
 
-       while (atomic_read(&pool->counter) > 0) {
-               if (max_count == 0) {
-                       pr_warn("ALSA: snd_seq_pool_done timeout: %d cells remain\n", atomic_read(&pool->counter));
-                       break;
-               }
+       while (atomic_read(&pool->counter) > 0)
                schedule_timeout_uninterruptible(1);
-               max_count--;
-       }
        
        /* release all resources */
        spin_lock_irqsave(&pool->lock, flags);
index 0bec02e89d5118b3dffe1e22e88e423baa037491..450c5187eecb6bb083736d2d2a1aad43b98c7c3f 100644 (file)
@@ -181,6 +181,8 @@ void __exit snd_seq_queues_delete(void)
        }
 }
 
+static void queue_use(struct snd_seq_queue *queue, int client, int use);
+
 /* allocate a new queue -
  * return queue index value or negative value for error
  */
@@ -192,11 +194,11 @@ int snd_seq_queue_alloc(int client, int locked, unsigned int info_flags)
        if (q == NULL)
                return -ENOMEM;
        q->info_flags = info_flags;
+       queue_use(q, client, 1);
        if (queue_list_add(q) < 0) {
                queue_delete(q);
                return -ENOMEM;
        }
-       snd_seq_queue_use(q->queue, client, 1); /* use this queue */
        return q->queue;
 }
 
@@ -502,19 +504,9 @@ int snd_seq_queue_timer_set_tempo(int queueid, int client,
        return result;
 }
 
-
-/* use or unuse this queue -
- * if it is the first client, starts the timer.
- * if it is not longer used by any clients, stop the timer.
- */
-int snd_seq_queue_use(int queueid, int client, int use)
+/* use or unuse this queue */
+static void queue_use(struct snd_seq_queue *queue, int client, int use)
 {
-       struct snd_seq_queue *queue;
-
-       queue = queueptr(queueid);
-       if (queue == NULL)
-               return -EINVAL;
-       mutex_lock(&queue->timer_mutex);
        if (use) {
                if (!test_and_set_bit(client, queue->clients_bitmap))
                        queue->clients++;
@@ -529,6 +521,21 @@ int snd_seq_queue_use(int queueid, int client, int use)
        } else {
                snd_seq_timer_close(queue);
        }
+}
+
+/* use or unuse this queue -
+ * if it is the first client, starts the timer.
+ * if it is not longer used by any clients, stop the timer.
+ */
+int snd_seq_queue_use(int queueid, int client, int use)
+{
+       struct snd_seq_queue *queue;
+
+       queue = queueptr(queueid);
+       if (queue == NULL)
+               return -EINVAL;
+       mutex_lock(&queue->timer_mutex);
+       queue_use(queue, client, use);
        mutex_unlock(&queue->timer_mutex);
        queuefree(queue);
        return 0;
index cf9bc042fe966361588b8dc92b66e308fd50e657..3fc201c3b95a33380217950d91285c83d3d7fb8a 100644 (file)
@@ -3639,6 +3639,7 @@ HDA_CODEC_ENTRY(0x10de0070, "GPU 70 HDMI/DP",     patch_nvhdmi),
 HDA_CODEC_ENTRY(0x10de0071, "GPU 71 HDMI/DP",  patch_nvhdmi),
 HDA_CODEC_ENTRY(0x10de0072, "GPU 72 HDMI/DP",  patch_nvhdmi),
 HDA_CODEC_ENTRY(0x10de007d, "GPU 7d HDMI/DP",  patch_nvhdmi),
+HDA_CODEC_ENTRY(0x10de0080, "GPU 80 HDMI/DP",  patch_nvhdmi),
 HDA_CODEC_ENTRY(0x10de0082, "GPU 82 HDMI/DP",  patch_nvhdmi),
 HDA_CODEC_ENTRY(0x10de0083, "GPU 83 HDMI/DP",  patch_nvhdmi),
 HDA_CODEC_ENTRY(0x10de8001, "MCP73 HDMI",      patch_nvhdmi_2ch),
index 90009c0b3a92e42f2598e05b451a0cbc0d79c8bf..ab3c280a23d1fa430c39f55816296cf453ad2613 100644 (file)
@@ -754,8 +754,9 @@ int line6_probe(struct usb_interface *interface,
                goto error;
        }
 
+       line6_get_interval(line6);
+
        if (properties->capabilities & LINE6_CAP_CONTROL) {
-               line6_get_interval(line6);
                ret = line6_init_cap_control(line6);
                if (ret < 0)
                        goto error;
index a2b3eb313a25ccb2284edfc8ef2021c3e646c786..af05f8e0903e27baccddc00748bf0f75820c9d3d 100644 (file)
@@ -84,6 +84,15 @@ struct kvm_regs {
 #define KVM_VGIC_V2_DIST_SIZE          0x1000
 #define KVM_VGIC_V2_CPU_SIZE           0x2000
 
+/* Supported VGICv3 address types  */
+#define KVM_VGIC_V3_ADDR_TYPE_DIST     2
+#define KVM_VGIC_V3_ADDR_TYPE_REDIST   3
+#define KVM_VGIC_ITS_ADDR_TYPE         4
+
+#define KVM_VGIC_V3_DIST_SIZE          SZ_64K
+#define KVM_VGIC_V3_REDIST_SIZE                (2 * SZ_64K)
+#define KVM_VGIC_V3_ITS_SIZE           (2 * SZ_64K)
+
 #define KVM_ARM_VCPU_POWER_OFF         0 /* CPU is started in OFF state */
 #define KVM_ARM_VCPU_PSCI_0_2          1 /* CPU uses PSCI v0.2 */
 
index c93cf35ce379550e37bcfe1d127def78af7de11c..3603b6f51b11beae38c1c1645086cafc807b07ab 100644 (file)
@@ -573,6 +573,10 @@ struct kvm_get_htab_header {
 #define KVM_REG_PPC_SPRG9      (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xba)
 #define KVM_REG_PPC_DBSR       (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xbb)
 
+/* POWER9 registers */
+#define KVM_REG_PPC_TIDR       (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbc)
+#define KVM_REG_PPC_PSSCR      (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbd)
+
 /* Transactional Memory checkpointed state:
  * This is all GPRs, all VSX regs and a subset of SPRs
  */
@@ -596,6 +600,7 @@ struct kvm_get_htab_header {
 #define KVM_REG_PPC_TM_VSCR    (KVM_REG_PPC_TM | KVM_REG_SIZE_U32 | 0x67)
 #define KVM_REG_PPC_TM_DSCR    (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x68)
 #define KVM_REG_PPC_TM_TAR     (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x69)
+#define KVM_REG_PPC_TM_XER     (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x6a)
 
 /* PPC64 eXternal Interrupt Controller Specification */
 #define KVM_DEV_XICS_GRP_SOURCES       1       /* 64-bit source attributes */
index cddd5d06e1cb13b16d9ca93f2f38c1a48f022b84..293149a1c6a114999473a78199a186a5cf040d22 100644 (file)
 #define X86_FEATURE_AMD_DCM     ( 3*32+27) /* multi-node processor */
 #define X86_FEATURE_APERFMPERF ( 3*32+28) /* APERFMPERF */
 #define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */
+#define X86_FEATURE_TSC_KNOWN_FREQ ( 3*32+31) /* TSC has known frequency */
 
 /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
 #define X86_FEATURE_XMM3       ( 4*32+ 0) /* "pni" SSE-3 */
 
 #define X86_FEATURE_CPB                ( 7*32+ 2) /* AMD Core Performance Boost */
 #define X86_FEATURE_EPB                ( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */
+#define X86_FEATURE_CAT_L3     ( 7*32+ 4) /* Cache Allocation Technology L3 */
+#define X86_FEATURE_CAT_L2     ( 7*32+ 5) /* Cache Allocation Technology L2 */
+#define X86_FEATURE_CDP_L3     ( 7*32+ 6) /* Code and Data Prioritization L3 */
 
 #define X86_FEATURE_HW_PSTATE  ( 7*32+ 8) /* AMD HW-PState */
 #define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
 
+#define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */
 #define X86_FEATURE_INTEL_PT   ( 7*32+15) /* Intel Processor Trace */
 #define X86_FEATURE_AVX512_4VNNIW (7*32+16) /* AVX-512 Neural Network Instructions */
 #define X86_FEATURE_AVX512_4FMAPS (7*32+17) /* AVX-512 Multiply Accumulation Single precision */
 #define X86_FEATURE_RTM                ( 9*32+11) /* Restricted Transactional Memory */
 #define X86_FEATURE_CQM                ( 9*32+12) /* Cache QoS Monitoring */
 #define X86_FEATURE_MPX                ( 9*32+14) /* Memory Protection Extension */
+#define X86_FEATURE_RDT_A      ( 9*32+15) /* Resource Director Technology Allocation */
 #define X86_FEATURE_AVX512F    ( 9*32+16) /* AVX-512 Foundation */
 #define X86_FEATURE_AVX512DQ   ( 9*32+17) /* AVX-512 DQ (Double/Quad granular) Instructions */
 #define X86_FEATURE_RDSEED     ( 9*32+18) /* The RDSEED instruction */
 #define X86_FEATURE_ADX                ( 9*32+19) /* The ADCX and ADOX instructions */
 #define X86_FEATURE_SMAP       ( 9*32+20) /* Supervisor Mode Access Prevention */
+#define X86_FEATURE_AVX512IFMA  ( 9*32+21) /* AVX-512 Integer Fused Multiply-Add instructions */
 #define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */
 #define X86_FEATURE_CLWB       ( 9*32+24) /* CLWB instruction */
 #define X86_FEATURE_AVX512PF   ( 9*32+26) /* AVX-512 Prefetch */
 #define X86_FEATURE_AVIC       (15*32+13) /* Virtual Interrupt Controller */
 
 /* Intel-defined CPU features, CPUID level 0x00000007:0 (ecx), word 16 */
+#define X86_FEATURE_AVX512VBMI  (16*32+ 1) /* AVX512 Vector Bit Manipulation instructions*/
 #define X86_FEATURE_PKU                (16*32+ 3) /* Protection Keys for Userspace */
 #define X86_FEATURE_OSPKE      (16*32+ 4) /* OS Protection Keys Enable */
+#define X86_FEATURE_AVX512_VPOPCNTDQ (16*32+14) /* POPCNT for vectors of DW/QW */
+#define X86_FEATURE_RDPID      (16*32+ 22) /* RDPID instruction */
 
 /* AMD-defined CPU features, CPUID level 0x80000007 (ebx), word 17 */
 #define X86_FEATURE_OVERFLOW_RECOV (17*32+0) /* MCA overflow recovery support */
 #define X86_BUG_NULL_SEG       X86_BUG(10) /* Nulling a selector preserves the base */
 #define X86_BUG_SWAPGS_FENCE   X86_BUG(11) /* SWAPGS without input dep on GS */
 #define X86_BUG_MONITOR                X86_BUG(12) /* IPI required to wake up remote CPU */
+#define X86_BUG_AMD_E400       X86_BUG(13) /* CPU is among the affected by Erratum 400 */
+
 #endif /* _ASM_X86_CPUFEATURES_H */
index 37fee272618f1de348a7d5961f1792debba72991..14458658e988bb6c9333e73701ea55e7b6525a92 100644 (file)
@@ -65,6 +65,8 @@
 #define EXIT_REASON_TPR_BELOW_THRESHOLD 43
 #define EXIT_REASON_APIC_ACCESS         44
 #define EXIT_REASON_EOI_INDUCED         45
+#define EXIT_REASON_GDTR_IDTR           46
+#define EXIT_REASON_LDTR_TR             47
 #define EXIT_REASON_EPT_VIOLATION       48
 #define EXIT_REASON_EPT_MISCONFIG       49
 #define EXIT_REASON_INVEPT              50
        { EXIT_REASON_MCE_DURING_VMENTRY,    "MCE_DURING_VMENTRY" }, \
        { EXIT_REASON_TPR_BELOW_THRESHOLD,   "TPR_BELOW_THRESHOLD" }, \
        { EXIT_REASON_APIC_ACCESS,           "APIC_ACCESS" }, \
+       { EXIT_REASON_GDTR_IDTR,             "GDTR_IDTR" }, \
+       { EXIT_REASON_LDTR_TR,               "LDTR_TR" }, \
        { EXIT_REASON_EPT_VIOLATION,         "EPT_VIOLATION" }, \
        { EXIT_REASON_EPT_MISCONFIG,         "EPT_MISCONFIG" }, \
        { EXIT_REASON_INVEPT,                "INVEPT" }, \
        { EXIT_REASON_XRSTORS,               "XRSTORS" }
 
 #define VMX_ABORT_SAVE_GUEST_MSR_FAIL        1
+#define VMX_ABORT_LOAD_HOST_PDPTE_FAIL       2
 #define VMX_ABORT_LOAD_HOST_MSR_FAIL         4
 
 #endif /* _UAPIVMX_H */
index 99c0ccd2f176592c03140d8e92671ba78aeac8e0..e279a71c650d2cdc7ca4f9e7c07f05a79e11b2eb 100644 (file)
@@ -19,6 +19,16 @@ else
   Q=@
 endif
 
+ifneq ($(filter 4.%,$(MAKE_VERSION)),) # make-4
+ifneq ($(filter %s ,$(firstword x$(MAKEFLAGS))),)
+  quiet=silent_
+endif
+else                                   # make-3.8x
+ifneq ($(filter s% -s%,$(MAKEFLAGS)),)
+  quiet=silent_
+endif
+endif
+
 build-dir := $(srctree)/tools/build
 
 # Define $(fixdep) for dep-cmd function
diff --git a/tools/include/linux/compiler-gcc.h b/tools/include/linux/compiler-gcc.h
new file mode 100644 (file)
index 0000000..48af2f1
--- /dev/null
@@ -0,0 +1,14 @@
+#ifndef _TOOLS_LINUX_COMPILER_H_
+#error "Please don't include <linux/compiler-gcc.h> directly, include <linux/compiler.h> instead."
+#endif
+
+/*
+ * Common definitions for all gcc versions go here.
+ */
+#define GCC_VERSION (__GNUC__ * 10000          \
+                    + __GNUC_MINOR__ * 100     \
+                    + __GNUC_PATCHLEVEL__)
+
+#if GCC_VERSION >= 70000 && !defined(__CHECKER__)
+# define __fallthrough __attribute__ ((fallthrough))
+#endif
index e33fc1df3935e3164280fab29ca0a34908ae5b16..6326ede9aecef7f6b417f67990894032e79b3f4a 100644 (file)
@@ -1,6 +1,10 @@
 #ifndef _TOOLS_LINUX_COMPILER_H_
 #define _TOOLS_LINUX_COMPILER_H_
 
+#ifdef __GNUC__
+#include <linux/compiler-gcc.h>
+#endif
+
 /* Optimization barrier */
 /* The "volatile" is due to gcc bugs */
 #define barrier() __asm__ __volatile__("": : :"memory")
@@ -126,4 +130,9 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
 #define WRITE_ONCE(x, val) \
        ({ union { typeof(x) __val; char __c[1]; } __u = { .__val = (val) }; __write_once_size(&(x), __u.__c, sizeof(x)); __u.__val; })
 
+
+#ifndef __fallthrough
+# define __fallthrough
+#endif
+
 #endif /* _TOOLS_LINUX_COMPILER_H */
index 0eb0e87dbe9f511672102f2123129328288a9159..d2b0ac799d03c925a6eec2b49bdb14525331687c 100644 (file)
@@ -116,6 +116,12 @@ enum bpf_attach_type {
 
 #define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE
 
+/* If BPF_F_ALLOW_OVERRIDE flag is used in BPF_PROG_ATTACH command
+ * to the given target_fd cgroup the descendent cgroup will be able to
+ * override effective bpf program that was inherited from this cgroup
+ */
+#define BPF_F_ALLOW_OVERRIDE   (1U << 0)
+
 #define BPF_PSEUDO_MAP_FD      1
 
 /* flags for BPF_MAP_UPDATE_ELEM command */
@@ -171,6 +177,7 @@ union bpf_attr {
                __u32           target_fd;      /* container object to attach to */
                __u32           attach_bpf_fd;  /* eBPF program to attach */
                __u32           attach_type;
+               __u32           attach_flags;
        };
 } __attribute__((aligned(8)));
 
index c03a79ebf9c8dbf3117be2b716711f1111845911..078b666fd78b8ad9b2cddc32d5975e0d8ae533d8 100644 (file)
@@ -3,11 +3,11 @@
 CC = $(CROSS_COMPILE)gcc
 CFLAGS = -Wall -Wextra -g -I../../include/uapi
 
-all: uledmon
+all: uledmon led_hw_brightness_mon
 %: %.c
        $(CC) $(CFLAGS) -o $@ $^
 
 clean:
-       $(RM) uledmon
+       $(RM) uledmon led_hw_brightness_mon
 
 .PHONY: all clean
diff --git a/tools/leds/led_hw_brightness_mon.c b/tools/leds/led_hw_brightness_mon.c
new file mode 100644 (file)
index 0000000..64642cc
--- /dev/null
@@ -0,0 +1,84 @@
+/*
+ * led_hw_brightness_mon.c
+ *
+ * This program monitors LED brightness level changes having its origin
+ * in hardware/firmware, i.e. outside of kernel control.
+ * A timestamp and brightness value is printed each time the brightness changes.
+ *
+ * Usage: led_hw_brightness_mon <device-name>
+ *
+ * <device-name> is the name of the LED class device to be monitored. Pressing
+ * CTRL+C will exit.
+ */
+
+#include <errno.h>
+#include <fcntl.h>
+#include <poll.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+#include <unistd.h>
+
+#include <linux/uleds.h>
+
+int main(int argc, char const *argv[])
+{
+       int fd, ret;
+       char brightness_file_path[LED_MAX_NAME_SIZE + 11];
+       struct pollfd pollfd;
+       struct timespec ts;
+       char buf[11];
+
+       if (argc != 2) {
+               fprintf(stderr, "Requires <device-name> argument\n");
+               return 1;
+       }
+
+       snprintf(brightness_file_path, LED_MAX_NAME_SIZE,
+                "/sys/class/leds/%s/brightness_hw_changed", argv[1]);
+
+       fd = open(brightness_file_path, O_RDONLY);
+       if (fd == -1) {
+               printf("Failed to open %s file\n", brightness_file_path);
+               return 1;
+       }
+
+       /*
+        * read may fail if no hw brightness change has occurred so far,
+        * but it is required to avoid spurious poll notifications in
+        * the opposite case.
+        */
+       read(fd, buf, sizeof(buf));
+
+       pollfd.fd = fd;
+       pollfd.events = POLLPRI;
+
+       while (1) {
+               ret = poll(&pollfd, 1, -1);
+               if (ret == -1) {
+                       printf("Failed to poll %s file (%d)\n",
+                               brightness_file_path, ret);
+                       ret = 1;
+                       break;
+               }
+
+               clock_gettime(CLOCK_MONOTONIC, &ts);
+
+               ret = read(fd, buf, sizeof(buf));
+               if (ret < 0)
+                       break;
+
+               ret = lseek(pollfd.fd, 0, SEEK_SET);
+               if (ret < 0) {
+                       printf("lseek failed (%d)\n", ret);
+                       break;
+               }
+
+               printf("[%ld.%09ld] %d\n", ts.tv_sec, ts.tv_nsec, atoi(buf));
+       }
+
+       close(fd);
+
+       return ret;
+}
index adba83b325d556d4b9280114905cdd3fb4b7313a..eb6e0b36bfc194771c15d66c5fe257d73cdf92de 100644 (file)
@@ -17,7 +17,13 @@ MAKEFLAGS += --no-print-directory
 LIBFILE = $(OUTPUT)libapi.a
 
 CFLAGS := $(EXTRA_WARNINGS) $(EXTRA_CFLAGS)
-CFLAGS += -ggdb3 -Wall -Wextra -std=gnu99 -O6 -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 -fPIC
+CFLAGS += -ggdb3 -Wall -Wextra -std=gnu99 -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 -fPIC
+
+ifeq ($(CC), clang)
+  CFLAGS += -O3
+else
+  CFLAGS += -O6
+endif
 
 # Treat warnings as errors unless directed not to
 ifneq ($(WERROR),0)
index f99f49e4a31e65faba6bad4b1c82b99a01de7ded..4b6bfc43cccf0e096c3c88e272afa1491fa69e97 100644 (file)
 #define HUGETLBFS_MAGIC        0x958458f6
 #endif
 
+#ifndef BPF_FS_MAGIC
+#define BPF_FS_MAGIC           0xcafe4a11
+#endif
+
 static const char * const sysfs__fs_known_mountpoints[] = {
        "/sys",
        0,
@@ -75,6 +79,11 @@ static const char * const hugetlbfs__known_mountpoints[] = {
        0,
 };
 
+static const char * const bpf_fs__known_mountpoints[] = {
+       "/sys/fs/bpf",
+       0,
+};
+
 struct fs {
        const char              *name;
        const char * const      *mounts;
@@ -89,6 +98,7 @@ enum {
        FS__DEBUGFS = 2,
        FS__TRACEFS = 3,
        FS__HUGETLBFS = 4,
+       FS__BPF_FS = 5,
 };
 
 #ifndef TRACEFS_MAGIC
@@ -121,6 +131,11 @@ static struct fs fs__entries[] = {
                .mounts = hugetlbfs__known_mountpoints,
                .magic  = HUGETLBFS_MAGIC,
        },
+       [FS__BPF_FS] = {
+               .name   = "bpf",
+               .mounts = bpf_fs__known_mountpoints,
+               .magic  = BPF_FS_MAGIC,
+       },
 };
 
 static bool fs__read_mounts(struct fs *fs)
@@ -280,6 +295,7 @@ FS(procfs,  FS__PROCFS);
 FS(debugfs, FS__DEBUGFS);
 FS(tracefs, FS__TRACEFS);
 FS(hugetlbfs, FS__HUGETLBFS);
+FS(bpf_fs, FS__BPF_FS);
 
 int filename__read_int(const char *filename, int *value)
 {
index a63269f5d20cbe5b9c794f64ad64bf7a16d0ce7d..6b332dc74498fc35a1f26b0488391e12435eb269 100644 (file)
@@ -22,6 +22,7 @@ FS(procfs)
 FS(debugfs)
 FS(tracefs)
 FS(hugetlbfs)
+FS(bpf_fs)
 
 #undef FS
 
index 251b7c342a87d1d78cbef0cebe2e6aafbb10588d..3e606b9c443efb77e75842cb80cfd10f92dae5bc 100644 (file)
@@ -86,9 +86,13 @@ void put_tracing_file(char *file)
        free(file);
 }
 
-static int strerror_open(int err, char *buf, size_t size, const char *filename)
+int tracing_path__strerror_open_tp(int err, char *buf, size_t size,
+                                  const char *sys, const char *name)
 {
        char sbuf[128];
+       char filename[PATH_MAX];
+
+       snprintf(filename, PATH_MAX, "%s/%s", sys, name ?: "*");
 
        switch (err) {
        case ENOENT:
@@ -99,10 +103,19 @@ static int strerror_open(int err, char *buf, size_t size, const char *filename)
                 * - jirka
                 */
                if (debugfs__configured() || tracefs__configured()) {
-                       snprintf(buf, size,
-                                "Error:\tFile %s/%s not found.\n"
-                                "Hint:\tPerhaps this kernel misses some CONFIG_ setting to enable this feature?.\n",
-                                tracing_events_path, filename);
+                       /* sdt markers */
+                       if (!strncmp(filename, "sdt_", 4)) {
+                               snprintf(buf, size,
+                                       "Error:\tFile %s/%s not found.\n"
+                                       "Hint:\tSDT event cannot be directly recorded on.\n"
+                                       "\tPlease first use 'perf probe %s:%s' before recording it.\n",
+                                       tracing_events_path, filename, sys, name);
+                       } else {
+                               snprintf(buf, size,
+                                        "Error:\tFile %s/%s not found.\n"
+                                        "Hint:\tPerhaps this kernel misses some CONFIG_ setting to enable this feature?.\n",
+                                        tracing_events_path, filename);
+                       }
                        break;
                }
                snprintf(buf, size, "%s",
@@ -125,12 +138,3 @@ static int strerror_open(int err, char *buf, size_t size, const char *filename)
 
        return 0;
 }
-
-int tracing_path__strerror_open_tp(int err, char *buf, size_t size, const char *sys, const char *name)
-{
-       char path[PATH_MAX];
-
-       snprintf(path, PATH_MAX, "%s/%s", sys, name ?: "*");
-
-       return strerror_open(err, buf, size, path);
-}
index 3ddb58a36d3c2534ce6207581a4fee1730c6e6a0..ae752fa4eaa741989040d982772740691486a1e3 100644 (file)
@@ -168,7 +168,8 @@ int bpf_obj_get(const char *pathname)
        return sys_bpf(BPF_OBJ_GET, &attr, sizeof(attr));
 }
 
-int bpf_prog_attach(int prog_fd, int target_fd, enum bpf_attach_type type)
+int bpf_prog_attach(int prog_fd, int target_fd, enum bpf_attach_type type,
+                   unsigned int flags)
 {
        union bpf_attr attr;
 
@@ -176,6 +177,7 @@ int bpf_prog_attach(int prog_fd, int target_fd, enum bpf_attach_type type)
        attr.target_fd     = target_fd;
        attr.attach_bpf_fd = prog_fd;
        attr.attach_type   = type;
+       attr.attach_flags  = flags;
 
        return sys_bpf(BPF_PROG_ATTACH, &attr, sizeof(attr));
 }
index a2f9853dd88259d810e6506ec5ac48d863bc074b..44fb7c5f8ae64cd8017e3365a1663ac7093a41fd 100644 (file)
@@ -22,6 +22,7 @@
 #define __BPF_BPF_H
 
 #include <linux/bpf.h>
+#include <stddef.h>
 
 int bpf_create_map(enum bpf_map_type map_type, int key_size, int value_size,
                   int max_entries, __u32 map_flags);
@@ -41,7 +42,8 @@ int bpf_map_delete_elem(int fd, void *key);
 int bpf_map_get_next_key(int fd, void *key, void *next_key);
 int bpf_obj_pin(int fd, const char *pathname);
 int bpf_obj_get(const char *pathname);
-int bpf_prog_attach(int prog_fd, int attachable_fd, enum bpf_attach_type type);
+int bpf_prog_attach(int prog_fd, int attachable_fd, enum bpf_attach_type type,
+                   unsigned int flags);
 int bpf_prog_detach(int attachable_fd, enum bpf_attach_type type);
 
 
index 84e6b35da4bd7da84022a1d914f765c5f7d8e3c6..ac6eb863b2a40df00c4ead9f48c872ab67949c19 100644 (file)
@@ -4,6 +4,7 @@
  * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
  * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
  * Copyright (C) 2015 Huawei Inc.
+ * Copyright (C) 2017 Nicira, Inc.
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU Lesser General Public
 #include <stdlib.h>
 #include <stdio.h>
 #include <stdarg.h>
+#include <libgen.h>
 #include <inttypes.h>
 #include <string.h>
 #include <unistd.h>
 #include <fcntl.h>
 #include <errno.h>
 #include <asm/unistd.h>
+#include <linux/err.h>
 #include <linux/kernel.h>
 #include <linux/bpf.h>
 #include <linux/list.h>
+#include <linux/limits.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/vfs.h>
 #include <libelf.h>
 #include <gelf.h>
 
 #define EM_BPF 247
 #endif
 
+#ifndef BPF_FS_MAGIC
+#define BPF_FS_MAGIC           0xcafe4a11
+#endif
+
 #define __printf(a, b) __attribute__((format(printf, a, b)))
 
 __printf(1, 2)
@@ -779,7 +790,7 @@ static int
 bpf_program__collect_reloc(struct bpf_program *prog,
                           size_t nr_maps, GElf_Shdr *shdr,
                           Elf_Data *data, Elf_Data *symbols,
-                          int maps_shndx)
+                          int maps_shndx, struct bpf_map *maps)
 {
        int i, nrels;
 
@@ -829,7 +840,15 @@ bpf_program__collect_reloc(struct bpf_program *prog,
                        return -LIBBPF_ERRNO__RELOC;
                }
 
-               map_idx = sym.st_value / sizeof(struct bpf_map_def);
+               /* TODO: 'maps' is sorted. We can use bsearch to make it faster. */
+               for (map_idx = 0; map_idx < nr_maps; map_idx++) {
+                       if (maps[map_idx].offset == sym.st_value) {
+                               pr_debug("relocation: find map %zd (%s) for insn %u\n",
+                                        map_idx, maps[map_idx].name, insn_idx);
+                               break;
+                       }
+               }
+
                if (map_idx >= nr_maps) {
                        pr_warning("bpf relocation: map_idx %d large than %d\n",
                                   (int)map_idx, (int)nr_maps - 1);
@@ -953,7 +972,8 @@ static int bpf_object__collect_reloc(struct bpf_object *obj)
                err = bpf_program__collect_reloc(prog, nr_maps,
                                                 shdr, data,
                                                 obj->efile.symbols,
-                                                obj->efile.maps_shndx);
+                                                obj->efile.maps_shndx,
+                                                obj->maps);
                if (err)
                        return err;
        }
@@ -1227,6 +1247,191 @@ out:
        return err;
 }
 
+static int check_path(const char *path)
+{
+       struct statfs st_fs;
+       char *dname, *dir;
+       int err = 0;
+
+       if (path == NULL)
+               return -EINVAL;
+
+       dname = strdup(path);
+       if (dname == NULL)
+               return -ENOMEM;
+
+       dir = dirname(dname);
+       if (statfs(dir, &st_fs)) {
+               pr_warning("failed to statfs %s: %s\n", dir, strerror(errno));
+               err = -errno;
+       }
+       free(dname);
+
+       if (!err && st_fs.f_type != BPF_FS_MAGIC) {
+               pr_warning("specified path %s is not on BPF FS\n", path);
+               err = -EINVAL;
+       }
+
+       return err;
+}
+
+int bpf_program__pin_instance(struct bpf_program *prog, const char *path,
+                             int instance)
+{
+       int err;
+
+       err = check_path(path);
+       if (err)
+               return err;
+
+       if (prog == NULL) {
+               pr_warning("invalid program pointer\n");
+               return -EINVAL;
+       }
+
+       if (instance < 0 || instance >= prog->instances.nr) {
+               pr_warning("invalid prog instance %d of prog %s (max %d)\n",
+                          instance, prog->section_name, prog->instances.nr);
+               return -EINVAL;
+       }
+
+       if (bpf_obj_pin(prog->instances.fds[instance], path)) {
+               pr_warning("failed to pin program: %s\n", strerror(errno));
+               return -errno;
+       }
+       pr_debug("pinned program '%s'\n", path);
+
+       return 0;
+}
+
+static int make_dir(const char *path)
+{
+       int err = 0;
+
+       if (mkdir(path, 0700) && errno != EEXIST)
+               err = -errno;
+
+       if (err)
+               pr_warning("failed to mkdir %s: %s\n", path, strerror(-err));
+       return err;
+}
+
+int bpf_program__pin(struct bpf_program *prog, const char *path)
+{
+       int i, err;
+
+       err = check_path(path);
+       if (err)
+               return err;
+
+       if (prog == NULL) {
+               pr_warning("invalid program pointer\n");
+               return -EINVAL;
+       }
+
+       if (prog->instances.nr <= 0) {
+               pr_warning("no instances of prog %s to pin\n",
+                          prog->section_name);
+               return -EINVAL;
+       }
+
+       err = make_dir(path);
+       if (err)
+               return err;
+
+       for (i = 0; i < prog->instances.nr; i++) {
+               char buf[PATH_MAX];
+               int len;
+
+               len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
+               if (len < 0)
+                       return -EINVAL;
+               else if (len >= PATH_MAX)
+                       return -ENAMETOOLONG;
+
+               err = bpf_program__pin_instance(prog, buf, i);
+               if (err)
+                       return err;
+       }
+
+       return 0;
+}
+
+int bpf_map__pin(struct bpf_map *map, const char *path)
+{
+       int err;
+
+       err = check_path(path);
+       if (err)
+               return err;
+
+       if (map == NULL) {
+               pr_warning("invalid map pointer\n");
+               return -EINVAL;
+       }
+
+       if (bpf_obj_pin(map->fd, path)) {
+               pr_warning("failed to pin map: %s\n", strerror(errno));
+               return -errno;
+       }
+
+       pr_debug("pinned map '%s'\n", path);
+       return 0;
+}
+
+int bpf_object__pin(struct bpf_object *obj, const char *path)
+{
+       struct bpf_program *prog;
+       struct bpf_map *map;
+       int err;
+
+       if (!obj)
+               return -ENOENT;
+
+       if (!obj->loaded) {
+               pr_warning("object not yet loaded; load it first\n");
+               return -ENOENT;
+       }
+
+       err = make_dir(path);
+       if (err)
+               return err;
+
+       bpf_map__for_each(map, obj) {
+               char buf[PATH_MAX];
+               int len;
+
+               len = snprintf(buf, PATH_MAX, "%s/%s", path,
+                              bpf_map__name(map));
+               if (len < 0)
+                       return -EINVAL;
+               else if (len >= PATH_MAX)
+                       return -ENAMETOOLONG;
+
+               err = bpf_map__pin(map, buf);
+               if (err)
+                       return err;
+       }
+
+       bpf_object__for_each_program(prog, obj) {
+               char buf[PATH_MAX];
+               int len;
+
+               len = snprintf(buf, PATH_MAX, "%s/%s", path,
+                              prog->section_name);
+               if (len < 0)
+                       return -EINVAL;
+               else if (len >= PATH_MAX)
+                       return -ENAMETOOLONG;
+
+               err = bpf_program__pin(prog, buf);
+               if (err)
+                       return err;
+       }
+
+       return 0;
+}
+
 void bpf_object__close(struct bpf_object *obj)
 {
        size_t i;
@@ -1419,37 +1624,33 @@ static void bpf_program__set_type(struct bpf_program *prog,
        prog->type = type;
 }
 
-int bpf_program__set_tracepoint(struct bpf_program *prog)
-{
-       if (!prog)
-               return -EINVAL;
-       bpf_program__set_type(prog, BPF_PROG_TYPE_TRACEPOINT);
-       return 0;
-}
-
-int bpf_program__set_kprobe(struct bpf_program *prog)
-{
-       if (!prog)
-               return -EINVAL;
-       bpf_program__set_type(prog, BPF_PROG_TYPE_KPROBE);
-       return 0;
-}
-
 static bool bpf_program__is_type(struct bpf_program *prog,
                                 enum bpf_prog_type type)
 {
        return prog ? (prog->type == type) : false;
 }
 
-bool bpf_program__is_tracepoint(struct bpf_program *prog)
-{
-       return bpf_program__is_type(prog, BPF_PROG_TYPE_TRACEPOINT);
-}
-
-bool bpf_program__is_kprobe(struct bpf_program *prog)
-{
-       return bpf_program__is_type(prog, BPF_PROG_TYPE_KPROBE);
-}
+#define BPF_PROG_TYPE_FNS(NAME, TYPE)                  \
+int bpf_program__set_##NAME(struct bpf_program *prog)  \
+{                                                      \
+       if (!prog)                                      \
+               return -EINVAL;                         \
+       bpf_program__set_type(prog, TYPE);              \
+       return 0;                                       \
+}                                                      \
+                                                       \
+bool bpf_program__is_##NAME(struct bpf_program *prog)  \
+{                                                      \
+       return bpf_program__is_type(prog, TYPE);        \
+}                                                      \
+
+BPF_PROG_TYPE_FNS(socket_filter, BPF_PROG_TYPE_SOCKET_FILTER);
+BPF_PROG_TYPE_FNS(kprobe, BPF_PROG_TYPE_KPROBE);
+BPF_PROG_TYPE_FNS(sched_cls, BPF_PROG_TYPE_SCHED_CLS);
+BPF_PROG_TYPE_FNS(sched_act, BPF_PROG_TYPE_SCHED_ACT);
+BPF_PROG_TYPE_FNS(tracepoint, BPF_PROG_TYPE_TRACEPOINT);
+BPF_PROG_TYPE_FNS(xdp, BPF_PROG_TYPE_XDP);
+BPF_PROG_TYPE_FNS(perf_event, BPF_PROG_TYPE_PERF_EVENT);
 
 int bpf_map__fd(struct bpf_map *map)
 {
@@ -1537,3 +1738,10 @@ bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset)
        }
        return ERR_PTR(-ENOENT);
 }
+
+long libbpf_get_error(const void *ptr)
+{
+       if (IS_ERR(ptr))
+               return PTR_ERR(ptr);
+       return 0;
+}
index a5a8b86a06fe0794623a3f9ac3fb3bf719553ba9..b30394f9947a35356af870223664a4409f8e5fd6 100644 (file)
@@ -22,8 +22,8 @@
 #define __BPF_LIBBPF_H
 
 #include <stdio.h>
+#include <stdint.h>
 #include <stdbool.h>
-#include <linux/err.h>
 #include <sys/types.h>  // for size_t
 
 enum libbpf_errno {
@@ -65,6 +65,7 @@ struct bpf_object *bpf_object__open(const char *path);
 struct bpf_object *bpf_object__open_buffer(void *obj_buf,
                                           size_t obj_buf_sz,
                                           const char *name);
+int bpf_object__pin(struct bpf_object *object, const char *path);
 void bpf_object__close(struct bpf_object *object);
 
 /* Load/unload object into/from kernel */
@@ -106,6 +107,9 @@ void *bpf_program__priv(struct bpf_program *prog);
 const char *bpf_program__title(struct bpf_program *prog, bool needs_copy);
 
 int bpf_program__fd(struct bpf_program *prog);
+int bpf_program__pin_instance(struct bpf_program *prog, const char *path,
+                             int instance);
+int bpf_program__pin(struct bpf_program *prog, const char *path);
 
 struct bpf_insn;
 
@@ -174,11 +178,21 @@ int bpf_program__nth_fd(struct bpf_program *prog, int n);
 /*
  * Adjust type of bpf program. Default is kprobe.
  */
+int bpf_program__set_socket_filter(struct bpf_program *prog);
 int bpf_program__set_tracepoint(struct bpf_program *prog);
 int bpf_program__set_kprobe(struct bpf_program *prog);
+int bpf_program__set_sched_cls(struct bpf_program *prog);
+int bpf_program__set_sched_act(struct bpf_program *prog);
+int bpf_program__set_xdp(struct bpf_program *prog);
+int bpf_program__set_perf_event(struct bpf_program *prog);
 
+bool bpf_program__is_socket_filter(struct bpf_program *prog);
 bool bpf_program__is_tracepoint(struct bpf_program *prog);
 bool bpf_program__is_kprobe(struct bpf_program *prog);
+bool bpf_program__is_sched_cls(struct bpf_program *prog);
+bool bpf_program__is_sched_act(struct bpf_program *prog);
+bool bpf_program__is_xdp(struct bpf_program *prog);
+bool bpf_program__is_perf_event(struct bpf_program *prog);
 
 /*
  * We don't need __attribute__((packed)) now since it is
@@ -223,5 +237,8 @@ typedef void (*bpf_map_clear_priv_t)(struct bpf_map *, void *);
 int bpf_map__set_priv(struct bpf_map *map, void *priv,
                      bpf_map_clear_priv_t clear_priv);
 void *bpf_map__priv(struct bpf_map *map);
+int bpf_map__pin(struct bpf_map *map, const char *path);
+
+long libbpf_get_error(const void *ptr);
 
 #endif
index 3f8cc44a0dbdad3665ad7b1f03a58a6ce3f92707..3d1c3b5b51504d3512df5825baaaeec70d6f939b 100644 (file)
@@ -19,7 +19,13 @@ MAKEFLAGS += --no-print-directory
 LIBFILE = $(OUTPUT)libsubcmd.a
 
 CFLAGS := $(EXTRA_WARNINGS) $(EXTRA_CFLAGS)
-CFLAGS += -ggdb3 -Wall -Wextra -std=gnu99 -O6 -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 -fPIC
+CFLAGS += -ggdb3 -Wall -Wextra -std=gnu99 -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 -fPIC
+
+ifeq ($(CC), clang)
+  CFLAGS += -O3
+else
+  CFLAGS += -O6
+endif
 
 # Treat warnings as errors unless directed not to
 ifneq ($(WERROR),0)
index 8aad81151d5068bacbc1e6d3c114248e9847bf84..6bc24025d05457098b504c3a1e9e5a1316b21817 100644 (file)
@@ -270,6 +270,8 @@ static int get_value(struct parse_opt_ctx_t *p,
                }
                if (get_arg(p, opt, flags, &arg))
                        return -1;
+               if (arg[0] == '-')
+                       return opterror(opt, "expects an unsigned numerical value", flags);
                *(unsigned int *)opt->value = strtol(arg, (char **)&s, 10);
                if (*s)
                        return opterror(opt, "expects a numerical value", flags);
@@ -302,6 +304,8 @@ static int get_value(struct parse_opt_ctx_t *p,
                }
                if (get_arg(p, opt, flags, &arg))
                        return -1;
+               if (arg[0] == '-')
+                       return opterror(opt, "expects an unsigned numerical value", flags);
                *(u64 *)opt->value = strtoull(arg, (char **)&s, 10);
                if (*s)
                        return opterror(opt, "expects a numerical value", flags);
index 11c3be3bcce79598bd758d436270399f0f240842..f054ca1b899df532612a9f11a317ce147b2319bf 100644 (file)
@@ -1,6 +1,7 @@
 #ifndef __SUBCMD_PARSE_OPTIONS_H
 #define __SUBCMD_PARSE_OPTIONS_H
 
+#include <linux/kernel.h>
 #include <stdbool.h>
 #include <stdint.h>
 
@@ -132,32 +133,32 @@ struct option {
 #define OPT_UINTEGER(s, l, v, h)    { .type = OPTION_UINTEGER, .short_name = (s), .long_name = (l), .value = check_vtype(v, unsigned int *), .help = (h) }
 #define OPT_LONG(s, l, v, h)        { .type = OPTION_LONG, .short_name = (s), .long_name = (l), .value = check_vtype(v, long *), .help = (h) }
 #define OPT_U64(s, l, v, h)         { .type = OPTION_U64, .short_name = (s), .long_name = (l), .value = check_vtype(v, u64 *), .help = (h) }
-#define OPT_STRING(s, l, v, a, h)   { .type = OPTION_STRING,  .short_name = (s), .long_name = (l), .value = check_vtype(v, const char **), (a), .help = (h) }
+#define OPT_STRING(s, l, v, a, h)   { .type = OPTION_STRING,  .short_name = (s), .long_name = (l), .value = check_vtype(v, const char **), .argh = (a), .help = (h) }
 #define OPT_STRING_OPTARG(s, l, v, a, h, d) \
        { .type = OPTION_STRING,  .short_name = (s), .long_name = (l), \
-         .value = check_vtype(v, const char **), (a), .help = (h), \
+         .value = check_vtype(v, const char **), .argh =(a), .help = (h), \
          .flags = PARSE_OPT_OPTARG, .defval = (intptr_t)(d) }
 #define OPT_STRING_OPTARG_SET(s, l, v, os, a, h, d) \
        { .type = OPTION_STRING, .short_name = (s), .long_name = (l), \
-         .value = check_vtype(v, const char **), (a), .help = (h), \
+         .value = check_vtype(v, const char **), .argh = (a), .help = (h), \
          .flags = PARSE_OPT_OPTARG, .defval = (intptr_t)(d), \
          .set = check_vtype(os, bool *)}
-#define OPT_STRING_NOEMPTY(s, l, v, a, h)   { .type = OPTION_STRING,  .short_name = (s), .long_name = (l), .value = check_vtype(v, const char **), (a), .help = (h), .flags = PARSE_OPT_NOEMPTY}
+#define OPT_STRING_NOEMPTY(s, l, v, a, h)   { .type = OPTION_STRING,  .short_name = (s), .long_name = (l), .value = check_vtype(v, const char **), .argh = (a), .help = (h), .flags = PARSE_OPT_NOEMPTY}
 #define OPT_DATE(s, l, v, h) \
        { .type = OPTION_CALLBACK, .short_name = (s), .long_name = (l), .value = (v), .argh = "time", .help = (h), .callback = parse_opt_approxidate_cb }
 #define OPT_CALLBACK(s, l, v, a, h, f) \
-       { .type = OPTION_CALLBACK, .short_name = (s), .long_name = (l), .value = (v), (a), .help = (h), .callback = (f) }
+       { .type = OPTION_CALLBACK, .short_name = (s), .long_name = (l), .value = (v), .argh = (a), .help = (h), .callback = (f) }
 #define OPT_CALLBACK_NOOPT(s, l, v, a, h, f) \
-       { .type = OPTION_CALLBACK, .short_name = (s), .long_name = (l), .value = (v), (a), .help = (h), .callback = (f), .flags = PARSE_OPT_NOARG }
+       { .type = OPTION_CALLBACK, .short_name = (s), .long_name = (l), .value = (v), .argh = (a), .help = (h), .callback = (f), .flags = PARSE_OPT_NOARG }
 #define OPT_CALLBACK_DEFAULT(s, l, v, a, h, f, d) \
-       { .type = OPTION_CALLBACK, .short_name = (s), .long_name = (l), .value = (v), (a), .help = (h), .callback = (f), .defval = (intptr_t)d, .flags = PARSE_OPT_LASTARG_DEFAULT }
+       { .type = OPTION_CALLBACK, .short_name = (s), .long_name = (l), .value = (v), .argh = (a), .help = (h), .callback = (f), .defval = (intptr_t)d, .flags = PARSE_OPT_LASTARG_DEFAULT }
 #define OPT_CALLBACK_DEFAULT_NOOPT(s, l, v, a, h, f, d) \
        { .type = OPTION_CALLBACK, .short_name = (s), .long_name = (l),\
-       .value = (v), (a), .help = (h), .callback = (f), .defval = (intptr_t)d,\
+       .value = (v), .arg = (a), .help = (h), .callback = (f), .defval = (intptr_t)d,\
        .flags = PARSE_OPT_LASTARG_DEFAULT | PARSE_OPT_NOARG}
 #define OPT_CALLBACK_OPTARG(s, l, v, d, a, h, f) \
        { .type = OPTION_CALLBACK, .short_name = (s), .long_name = (l), \
-         .value = (v), (a), .help = (h), .callback = (f), \
+         .value = (v), .argh = (a), .help = (h), .callback = (f), \
          .flags = PARSE_OPT_OPTARG, .data = (d) }
 
 /* parse_options() will filter out the processed options and leave the
index 2616c66e10c1ea0bf4016138bd2a6cdbc3650020..47076b15eebeaa5b54583761130b10ecef2fc0aa 100644 (file)
@@ -257,10 +257,16 @@ define do_install_plugins
 endef
 
 define do_generate_dynamic_list_file
-       (echo '{';                                                      \
-       $(NM) -u -D $1 | awk 'NF>1 {print "\t"$$2";"}' | sort -u;       \
-       echo '};';                                                      \
-       ) > $2
+       symbol_type=`$(NM) -u -D $1 | awk 'NF>1 {print $$1}' | \
+       xargs echo "U W w" | tr ' ' '\n' | sort -u | xargs echo`;\
+       if [ "$$symbol_type" = "U W w" ];then                           \
+               (echo '{';                                              \
+               $(NM) -u -D $1 | awk 'NF>1 {print "\t"$$2";"}' | sort -u;\
+               echo '};';                                              \
+               ) > $2;                                                 \
+       else                                                            \
+               (echo Either missing one of [$1] or bad version of $(NM)) 1>&2;\
+       fi
 endef
 
 install_lib: all_cmd install_plugins
index 65984f1c2974dd7d61d7af85729eed78ca7ddd46..c94e3641b046638d26164f29dba01cee67fa2f56 100644 (file)
@@ -315,6 +315,7 @@ static unsigned int old_update_pointers(struct kbuffer *kbuf)
                extend += delta;
                delta = extend;
                ptr += 4;
+               length = 0;
                break;
 
        case OLD_RINGBUF_TYPE_TIME_STAMP:
index a00ec190821aa352234ddf11f5d6ab3bb911dc5f..42dbf73758f3425f20c3401a5c64f024459ddfaf 100644 (file)
@@ -130,7 +130,7 @@ static int function_handler(struct trace_seq *s, struct pevent_record *record,
        unsigned long long pfunction;
        const char *func;
        const char *parent;
-       int index;
+       int index = 0;
 
        if (pevent_get_field_val(s, event, "ip", record, &function, 1))
                return trace_seq_putc(s, '!');
index 5e0dea2cdc01f65849f49f10392293a21b3a468d..039636ffb6c8a3edb6c14fd9a2b3a854ab84f982 100644 (file)
@@ -150,9 +150,9 @@ int arch_decode_instruction(struct elf *elf, struct section *sec,
                *type = INSN_RETURN;
                break;
 
-       case 0xc5: /* iret */
        case 0xca: /* retf */
        case 0xcb: /* retf */
+       case 0xcf: /* iret */
                *type = INSN_CONTEXT_SWITCH;
                break;
 
index b12d5d1666e3c1502c66308e9c535679a3189b29..9b79f8d7db50e3dafcbe200ad8f3b47230ead69b 100644 (file)
@@ -3,10 +3,12 @@ perf-y += builtin-annotate.o
 perf-y += builtin-config.o
 perf-y += builtin-diff.o
 perf-y += builtin-evlist.o
+perf-y += builtin-ftrace.o
 perf-y += builtin-help.o
 perf-y += builtin-sched.o
 perf-y += builtin-buildid-list.o
 perf-y += builtin-buildid-cache.o
+perf-y += builtin-kallsyms.o
 perf-y += builtin-list.o
 perf-y += builtin-record.o
 perf-y += builtin-report.o
@@ -39,8 +41,7 @@ CFLAGS_builtin-help.o      += $(paths)
 CFLAGS_builtin-timechart.o += $(paths)
 CFLAGS_perf.o              += -DPERF_HTML_PATH="BUILD_STR($(htmldir_SQ))"      \
                              -DPERF_EXEC_PATH="BUILD_STR($(perfexecdir_SQ))"   \
-                             -DPREFIX="BUILD_STR($(prefix_SQ))"                \
-                             -include $(OUTPUT)PERF-VERSION-FILE
+                             -DPREFIX="BUILD_STR($(prefix_SQ))"
 CFLAGS_builtin-trace.o    += -DSTRACE_GROUPS_DIR="BUILD_STR($(STRACE_GROUPS_DIR_SQ))"
 CFLAGS_builtin-report.o           += -DTIPDIR="BUILD_STR($(tipdir_SQ))"
 CFLAGS_builtin-report.o           += -DDOCDIR="BUILD_STR($(srcdir_SQ)/Documentation)"
index 3f06730c7f47cbed8118be7664666a0d90db42ca..2da07e51e1190271dd0d74130baf4807b0b08031 100644 (file)
@@ -248,7 +248,7 @@ output fields set for caheline offsets output:
              Code address, Code symbol, Shared Object, Source line
   dso   - coalesced by shared object
 
-By default the coalescing is setup with 'pid,tid,iaddr'.
+By default the coalescing is setup with 'pid,iaddr'.
 
 STDIO OUTPUT
 ------------
index 9365b75fd04fdfe823eaa8abf29e6cb25fcb05bb..5b4fff3adc4be4ca66a30a9e8b85b55e1a472f4a 100644 (file)
@@ -498,6 +498,18 @@ record.*::
                But if this option is 'no-cache', it will not update the build-id cache.
                'skip' skips post-processing and does not update the cache.
 
+diff.*::
+       diff.order::
+               This option sets the number of columns to sort the result.
+               The default is 0, which means sorting by baseline.
+               Setting it to 1 will sort the result by delta (or other
+               compute method selected).
+
+       diff.compute::
+               This options sets the method for computing the diff result.
+               Possible values are 'delta', 'delta-abs', 'ratio' and
+               'wdiff'.  Default is 'delta'.
+
 SEE ALSO
 --------
 linkperf:perf[1]
index 3e9490b9c5334486d9787d52e531e1002c917c47..66dbe3dee74bcaae5b523bf3f80741b8608c87e2 100644 (file)
@@ -86,8 +86,9 @@ OPTIONS
 
 -c::
 --compute::
-        Differential computation selection - delta,ratio,wdiff (default is delta).
-        See COMPARISON METHODS section for more info.
+        Differential computation selection - delta, ratio, wdiff, delta-abs
+        (default is delta-abs).  Default can be changed using diff.compute
+        config option.  See COMPARISON METHODS section for more info.
 
 -p::
 --period::
@@ -99,7 +100,11 @@ OPTIONS
 
 -o::
 --order::
-       Specify compute sorting column number.
+       Specify compute sorting column number.  0 means sorting by baseline
+       overhead and 1 (default) means sorting by computed value of column 1
+       (data from the first file other base baseline).  Values more than 1
+       can be used only if enough data files are provided.
+       The default value can be set using the diff.order config option.
 
 --percentage::
        Determine how to display the overhead percentage of filtered entries.
@@ -181,6 +186,10 @@ with:
     relative to how entries are filtered.  Use --percentage=absolute to
     prevent such fluctuation.
 
+delta-abs
+~~~~~~~~~
+Same as 'delta` method, but sort the result with the absolute values.
+
 ratio
 ~~~~~
 If specified the 'Ratio' column is displayed with value 'r' computed as:
diff --git a/tools/perf/Documentation/perf-ftrace.txt b/tools/perf/Documentation/perf-ftrace.txt
new file mode 100644 (file)
index 0000000..2d96de6
--- /dev/null
@@ -0,0 +1,36 @@
+perf-ftrace(1)
+=============
+
+NAME
+----
+perf-ftrace - simple wrapper for kernel's ftrace functionality
+
+
+SYNOPSIS
+--------
+[verse]
+'perf ftrace' <command>
+
+DESCRIPTION
+-----------
+The 'perf ftrace' command is a simple wrapper of kernel's ftrace
+functionality.  It only supports single thread tracing currently and
+just reads trace_pipe in text and then write it to stdout.
+
+The following options apply to perf ftrace.
+
+OPTIONS
+-------
+
+-t::
+--tracer=::
+       Tracer to use: function_graph or function.
+
+-v::
+--verbose=::
+        Verbosity level.
+
+
+SEE ALSO
+--------
+linkperf:perf-record[1], linkperf:perf-trace[1]
diff --git a/tools/perf/Documentation/perf-kallsyms.txt b/tools/perf/Documentation/perf-kallsyms.txt
new file mode 100644 (file)
index 0000000..954ea9e
--- /dev/null
@@ -0,0 +1,24 @@
+perf-kallsyms(1)
+==============
+
+NAME
+----
+perf-kallsyms - Searches running kernel for symbols
+
+SYNOPSIS
+--------
+[verse]
+'perf kallsyms <options> symbol_name[,symbol_name...]'
+
+DESCRIPTION
+-----------
+This command searches the running kernel kallsyms file for the given symbol(s)
+and prints information about it, including the DSO, the kallsyms begin/end
+addresses and the addresses in the ELF kallsyms symbol table (for symbols in
+modules).
+
+OPTIONS
+-------
+-v::
+--verbose=::
+       Increase verbosity level, showing details about symbol table loading, etc.
index 5054d9147f0f03122f96338f96e9acd08cc36992..27256bc68eda0268fc79d9d9eca06bb349a8212e 100644 (file)
@@ -421,9 +421,19 @@ Configure all used events to run in user space.
 --timestamp-filename
 Append timestamp to output file name.
 
---switch-output::
+--switch-output[=mode]::
 Generate multiple perf.data files, timestamp prefixed, switching to a new one
-when receiving a SIGUSR2.
+based on 'mode' value:
+  "signal" - when receiving a SIGUSR2 (default value) or
+  <size>   - when reaching the size threshold, size is expected to
+             be a number with appended unit character - B/K/M/G
+  <time>   - when reaching the time threshold, size is expected to
+             be a number with appended unit character - s/m/h/d
+
+             Note: the precision of  the size  threshold  hugely depends
+             on your configuration  - the number and size of  your  ring
+             buffers (-m). It is generally more precise for higher sizes
+             (like >5M), for lower values expect different sizes.
 
 A possible use case is to, given an external event, slice the perf.data file
 that gets then processed, possibly via a perf script, to decide if that
index 76173969ab80375c468e2722ec5e2773dab96a7c..d33deddb0146cc9500f29fe6f0b1e65d6f237b10 100644 (file)
@@ -143,6 +143,8 @@ OPTIONS for 'perf sched timehist'
        stop time is not given (i.e, time string is 'x.y,') then analysis goes
        to end of file.
 
+--state::
+       Show task state when it switched out.
 
 SEE ALSO
 --------
index 5dc5c6a09ac4f4113fd88f56f16c8713ff2e9646..4ed5f239ba7dee9fe2fc44854e3b8daa42091a97 100644 (file)
@@ -36,7 +36,7 @@ There are several variants of perf script:
 
   'perf script report <script> [args]' to run and display the results
   of <script>.  <script> is the name displayed in the output of 'perf
-  trace --list' i.e. the actual script name minus any language
+  script --list' i.e. the actual script name minus any language
   extension.  The perf.data output from a previous run of 'perf script
   record <script>' is used and should be present for this command to
   succeed.  [args] refers to the (mainly optional) args expected by
@@ -76,7 +76,7 @@ OPTIONS
        Any command you can specify in a shell.
 
 -D::
---dump-raw-script=::
+--dump-raw-trace=::
         Display verbose dump of the trace data.
 
 -L::
index 781b019751a4cb8bafeb70afc467aca54c410177..afd728672b6fb488415ca4c58043baaf6b264226 100644 (file)
@@ -35,7 +35,10 @@ OPTIONS
 
 -e::
 --expr::
-       List of syscalls to show, currently only syscall names.
+--event::
+       List of syscalls and other perf events (tracepoints, HW cache events,
+       etc) to show.
+       See 'perf list' for a complete list of events.
        Prefixing with ! shows all syscalls but the ones specified.  You may
        need to escape it.
 
@@ -135,9 +138,6 @@ the thread executes on the designated CPUs. Default is to monitor all CPUs.
 --kernel-syscall-graph::
         Show the kernel callchains on the syscall exit path.
 
---event::
-       Trace other events, see 'perf list' for a complete list.
-
 --max-stack::
         Set the stack depth limit when parsing the callchain, anything
         beyond the specified depth will be ignored. Note that at this point
index a511e5f31e36185a7e9fd8cb83eed23371cc9002..8672f835ae4eff2659ff314c40bf77d3730b7986 100644 (file)
@@ -61,6 +61,7 @@ tools/include/asm-generic/bitops.h
 tools/include/linux/atomic.h
 tools/include/linux/bitops.h
 tools/include/linux/compiler.h
+tools/include/linux/compiler-gcc.h
 tools/include/linux/coresight-pmu.h
 tools/include/linux/filter.h
 tools/include/linux/hash.h
index 76c84f0eec52020963d41d047da83e4cea487070..2b941efadb04e34779f00c4b6348262da0fb3dd6 100644 (file)
@@ -144,8 +144,12 @@ ifndef DEBUG
 endif
 
 ifeq ($(DEBUG),0)
+ifeq ($(CC), clang)
+  CFLAGS += -O3
+else
   CFLAGS += -O6
 endif
+endif
 
 ifdef PARSER_DEBUG
   PARSER_DEBUG_BISON := -t
@@ -291,8 +295,10 @@ else
       endif
     endif
     ifneq ($(feature-dwarf), 1)
-      msg := $(warning No libdw.h found or old libdw.h found or elfutils is older than 0.138, disables dwarf support. Please install new elfutils-devel/libdw-dev);
-      NO_DWARF := 1
+      ifndef NO_DWARF
+        msg := $(warning No libdw.h found or old libdw.h found or elfutils is older than 0.138, disables dwarf support. Please install new elfutils-devel/libdw-dev);
+        NO_DWARF := 1
+      endif
     else
       ifneq ($(feature-dwarf_getlocations), 1)
         msg := $(warning Old libdw.h, finding variables at given 'perf probe' point will not work, install elfutils-devel/libdw-dev >= 0.157);
index 8bb16aa9d661d0e5b8104adc0ef3d14a1fc296f9..4da19b6ba94acd03c9764733f84d089def765dbf 100644 (file)
@@ -661,6 +661,7 @@ ifndef NO_PERF_READ_VDSOX32
 endif
 ifndef NO_JVMTI
        $(call QUIET_INSTALL, $(LIBJVMTI)) \
+               $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(libdir_SQ)'; \
                $(INSTALL) $(OUTPUT)$(LIBJVMTI) '$(DESTDIR_SQ)$(libdir_SQ)';
 endif
        $(call QUIET_INSTALL, libexec) \
index 18b13518d8d8701137d489a65f7dd752bdc7f43f..eebe1ec9d2ee70e7ab58418d976744ff45bb30e0 100644 (file)
@@ -2,3 +2,4 @@ ifndef NO_DWARF
 PERF_HAVE_DWARF_REGS := 1
 endif
 PERF_HAVE_JITDUMP := 1
+PERF_HAVE_ARCH_REGS_QUERY_REGISTER_OFFSET := 1
index 26759363f9217257982c6dcf874e16ec3d816eb4..36e375f5a211f262a956617d512ebf00afa6f054 100644 (file)
@@ -2,12 +2,12 @@
 /* This is included in perf/util/dwarf-regs.c */
 
 static const char * const aarch64_regstr_tbl[] = {
-       "%r0", "%r1", "%r2", "%r3", "%r4",
-       "%r5", "%r6", "%r7", "%r8", "%r9",
-       "%r10", "%r11", "%r12", "%r13", "%r14",
-       "%r15", "%r16", "%r17", "%r18", "%r19",
-       "%r20", "%r21", "%r22", "%r23", "%r24",
-       "%r25", "%r26", "%r27", "%r28", "%r29",
+       "%x0", "%x1", "%x2", "%x3", "%x4",
+       "%x5", "%x6", "%x7", "%x8", "%x9",
+       "%x10", "%x11", "%x12", "%x13", "%x14",
+       "%x15", "%x16", "%x17", "%x18", "%x19",
+       "%x20", "%x21", "%x22", "%x23", "%x24",
+       "%x25", "%x26", "%x27", "%x28", "%x29",
        "%lr", "%sp",
 };
 #endif
index d49efeb8172eeaa2f31bf8f6dae026fffc90e75f..068b6189157b6eb9a11c58f7cbca1f4ca1348efb 100644 (file)
 
 #include <stddef.h>
 #include <dwarf-regs.h>
+#include <linux/ptrace.h> /* for struct user_pt_regs */
+#include "util.h"
 
 struct pt_regs_dwarfnum {
        const char *name;
        unsigned int dwarfnum;
 };
 
-#define STR(s) #s
 #define REG_DWARFNUM_NAME(r, num) {.name = r, .dwarfnum = num}
 #define GPR_DWARFNUM_NAME(num) \
        {.name = STR(%x##num), .dwarfnum = num}
 #define REG_DWARFNUM_END {.name = NULL, .dwarfnum = 0}
+#define DWARFNUM2OFFSET(index) \
+       (index * sizeof((struct user_pt_regs *)0)->regs[0])
 
 /*
  * Reference:
@@ -78,3 +81,13 @@ const char *get_arch_regstr(unsigned int n)
                        return roff->name;
        return NULL;
 }
+
+int regs_query_register_offset(const char *name)
+{
+       const struct pt_regs_dwarfnum *roff;
+
+       for (roff = regdwarfnum_table; roff->name != NULL; roff++)
+               if (!strcmp(roff->name, name))
+                       return DWARFNUM2OFFSET(roff->dwarfnum);
+       return -EINVAL;
+}
index bfbb6b5f609cd0427bf3f7531143771e2c4f4248..da04b8c5568a39cf509b9aa5df844641866bcfd1 100644 (file)
@@ -130,8 +130,6 @@ int bench_futex_hash(int argc, const char **argv,
        }
 
        ncpus = sysconf(_SC_NPROCESSORS_ONLN);
-       nsecs = futexbench_sanitize_numeric(nsecs);
-       nfutexes = futexbench_sanitize_numeric(nfutexes);
 
        sigfillset(&act.sa_mask);
        act.sa_sigaction = toggle_done;
@@ -139,8 +137,6 @@ int bench_futex_hash(int argc, const char **argv,
 
        if (!nthreads) /* default to the number of CPUs */
                nthreads = ncpus;
-       else
-               nthreads = futexbench_sanitize_numeric(nthreads);
 
        worker = calloc(nthreads, sizeof(*worker));
        if (!worker)
index 6d9d6c40a9164442ce2d0c8a9cdd8831cd4a6297..91877777ec6e3a4052e49eca9cba4793fa551f73 100644 (file)
@@ -152,7 +152,6 @@ int bench_futex_lock_pi(int argc, const char **argv,
                goto err;
 
        ncpus = sysconf(_SC_NPROCESSORS_ONLN);
-       nsecs = futexbench_sanitize_numeric(nsecs);
 
        sigfillset(&act.sa_mask);
        act.sa_sigaction = toggle_done;
@@ -160,8 +159,6 @@ int bench_futex_lock_pi(int argc, const char **argv,
 
        if (!nthreads)
                nthreads = ncpus;
-       else
-               nthreads = futexbench_sanitize_numeric(nthreads);
 
        worker = calloc(nthreads, sizeof(*worker));
        if (!worker)
index fd4ee95b689a11c4646c33fdd07a1c202cc50958..2b9705a8734cd6005fd3149943e2ff20eecf4bbe 100644 (file)
@@ -128,8 +128,6 @@ int bench_futex_requeue(int argc, const char **argv,
 
        if (!nthreads)
                nthreads = ncpus;
-       else
-               nthreads = futexbench_sanitize_numeric(nthreads);
 
        worker = calloc(nthreads, sizeof(*worker));
        if (!worker)
index beaa6c142477b5d500c9c83d12dadc242420e497..2c8fa67ad53767e43c7e91dc84279528268fcb3a 100644 (file)
@@ -217,12 +217,8 @@ int bench_futex_wake_parallel(int argc, const char **argv,
        sigaction(SIGINT, &act, NULL);
 
        ncpus = sysconf(_SC_NPROCESSORS_ONLN);
-       nwaking_threads = futexbench_sanitize_numeric(nwaking_threads);
-
        if (!nblocked_threads)
                nblocked_threads = ncpus;
-       else
-               nblocked_threads = futexbench_sanitize_numeric(nblocked_threads);
 
        /* some sanity checks */
        if (nwaking_threads > nblocked_threads || !nwaking_threads)
index 46efcb98b5a4be7041678712ed29d8c69ca3ab3c..e246b1b8388a3fd3bfc7c9c398a90efc26d86c79 100644 (file)
@@ -129,7 +129,6 @@ int bench_futex_wake(int argc, const char **argv,
        }
 
        ncpus = sysconf(_SC_NPROCESSORS_ONLN);
-       nwakes = futexbench_sanitize_numeric(nwakes);
 
        sigfillset(&act.sa_mask);
        act.sa_sigaction = toggle_done;
@@ -137,8 +136,6 @@ int bench_futex_wake(int argc, const char **argv,
 
        if (!nthreads)
                nthreads = ncpus;
-       else
-               nthreads = futexbench_sanitize_numeric(nthreads);
 
        worker = calloc(nthreads, sizeof(*worker));
        if (!worker)
index ba7c735c0c62db090a2c1fddd9aec54876496cfe..b2e06d1190d0766694c97f7f0b808717442af604 100644 (file)
@@ -7,7 +7,6 @@
 #ifndef _FUTEX_H
 #define _FUTEX_H
 
-#include <stdlib.h>
 #include <unistd.h>
 #include <sys/syscall.h>
 #include <sys/types.h>
@@ -100,7 +99,4 @@ static inline int pthread_attr_setaffinity_np(pthread_attr_t *attr,
 }
 #endif
 
-/* User input sanitation */
-#define futexbench_sanitize_numeric(__n) abs((__n))
-
 #endif /* _FUTEX_H */
index 8efe904e486bf98b63e24b7904652f10ca5ec7e9..3083fc36282b564d3fc5da05864907c3ac222cf4 100644 (file)
@@ -43,6 +43,7 @@
 /*
  * Debug printf:
  */
+#undef dprintf
 #define dprintf(x...) do { if (g && g->p.show_details >= 1) printf(x); } while (0)
 
 struct thread_data {
@@ -1573,13 +1574,13 @@ static int __bench_numa(const char *name)
                "GB/sec,", "total-speed",       "GB/sec total speed");
 
        if (g->p.show_details >= 2) {
-               char tname[32];
+               char tname[14 + 2 * 10 + 1];
                struct thread_data *td;
                for (p = 0; p < g->p.nr_proc; p++) {
                        for (t = 0; t < g->p.nr_threads; t++) {
-                               memset(tname, 0, 32);
+                               memset(tname, 0, sizeof(tname));
                                td = g->threads + p*g->p.nr_threads + t;
-                               snprintf(tname, 32, "process%d:thread%d", p, t);
+                               snprintf(tname, sizeof(tname), "process%d:thread%d", p, t);
                                print_res(tname, td->speed_gbs,
                                        "GB/sec",       "thread-speed", "GB/sec/thread speed");
                                print_res(tname, td->system_time_ns / NSEC_PER_SEC,
index f8ca7a4ebabcf2f9b805283ddf4df82804316bee..e2b21723bbf8acddac596ebf519ba5b7f82c84b3 100644 (file)
@@ -58,7 +58,7 @@ struct c2c_hist_entry {
        struct hist_entry       he;
 };
 
-static char const *coalesce_default = "pid,tid,iaddr";
+static char const *coalesce_default = "pid,iaddr";
 
 struct perf_c2c {
        struct perf_tool        tool;
@@ -2476,6 +2476,7 @@ static int build_cl_output(char *cl_sort, bool no_source)
                "mean_rmt,"
                "mean_lcl,"
                "mean_load,"
+               "tot_recs,"
                "cpucnt,",
                add_sym ? "symbol," : "",
                add_dso ? "dso," : "",
index 9ff0db4e2d0cd1bcc35d2ea137871093ebfcb405..70a2893475912e3eee09f5b8787136329dec2116 100644 (file)
@@ -17,6 +17,7 @@
 #include "util/symbol.h"
 #include "util/util.h"
 #include "util/data.h"
+#include "util/config.h"
 
 #include <stdlib.h>
 #include <math.h>
@@ -30,6 +31,7 @@ enum {
        PERF_HPP_DIFF__RATIO,
        PERF_HPP_DIFF__WEIGHTED_DIFF,
        PERF_HPP_DIFF__FORMULA,
+       PERF_HPP_DIFF__DELTA_ABS,
 
        PERF_HPP_DIFF__MAX_INDEX
 };
@@ -64,7 +66,7 @@ static bool force;
 static bool show_period;
 static bool show_formula;
 static bool show_baseline_only;
-static unsigned int sort_compute;
+static unsigned int sort_compute = 1;
 
 static s64 compute_wdiff_w1;
 static s64 compute_wdiff_w2;
@@ -73,19 +75,22 @@ enum {
        COMPUTE_DELTA,
        COMPUTE_RATIO,
        COMPUTE_WEIGHTED_DIFF,
+       COMPUTE_DELTA_ABS,
        COMPUTE_MAX,
 };
 
 const char *compute_names[COMPUTE_MAX] = {
        [COMPUTE_DELTA] = "delta",
+       [COMPUTE_DELTA_ABS] = "delta-abs",
        [COMPUTE_RATIO] = "ratio",
        [COMPUTE_WEIGHTED_DIFF] = "wdiff",
 };
 
-static int compute;
+static int compute = COMPUTE_DELTA_ABS;
 
 static int compute_2_hpp[COMPUTE_MAX] = {
        [COMPUTE_DELTA]         = PERF_HPP_DIFF__DELTA,
+       [COMPUTE_DELTA_ABS]     = PERF_HPP_DIFF__DELTA_ABS,
        [COMPUTE_RATIO]         = PERF_HPP_DIFF__RATIO,
        [COMPUTE_WEIGHTED_DIFF] = PERF_HPP_DIFF__WEIGHTED_DIFF,
 };
@@ -111,6 +116,10 @@ static struct header_column {
                .name  = "Delta",
                .width = 7,
        },
+       [PERF_HPP_DIFF__DELTA_ABS] = {
+               .name  = "Delta Abs",
+               .width = 7,
+       },
        [PERF_HPP_DIFF__RATIO] = {
                .name  = "Ratio",
                .width = 14,
@@ -298,6 +307,7 @@ static int formula_fprintf(struct hist_entry *he, struct hist_entry *pair,
 {
        switch (compute) {
        case COMPUTE_DELTA:
+       case COMPUTE_DELTA_ABS:
                return formula_delta(he, pair, buf, size);
        case COMPUTE_RATIO:
                return formula_ratio(he, pair, buf, size);
@@ -461,6 +471,7 @@ static void hists__precompute(struct hists *hists)
 
                        switch (compute) {
                        case COMPUTE_DELTA:
+                       case COMPUTE_DELTA_ABS:
                                compute_delta(he, pair);
                                break;
                        case COMPUTE_RATIO:
@@ -498,6 +509,13 @@ __hist_entry__cmp_compute(struct hist_entry *left, struct hist_entry *right,
 
                return cmp_doubles(l, r);
        }
+       case COMPUTE_DELTA_ABS:
+       {
+               double l = fabs(left->diff.period_ratio_delta);
+               double r = fabs(right->diff.period_ratio_delta);
+
+               return cmp_doubles(l, r);
+       }
        case COMPUTE_RATIO:
        {
                double l = left->diff.period_ratio;
@@ -564,7 +582,7 @@ hist_entry__cmp_compute_idx(struct hist_entry *left, struct hist_entry *right,
        if (!p_left || !p_right)
                return p_left ? -1 : 1;
 
-       if (c != COMPUTE_DELTA) {
+       if (c != COMPUTE_DELTA && c != COMPUTE_DELTA_ABS) {
                /*
                 * The delta can be computed without the baseline, but
                 * others are not.  Put those entries which have no
@@ -606,6 +624,15 @@ hist_entry__cmp_delta(struct perf_hpp_fmt *fmt,
        return hist_entry__cmp_compute(right, left, COMPUTE_DELTA, d->idx);
 }
 
+static int64_t
+hist_entry__cmp_delta_abs(struct perf_hpp_fmt *fmt,
+                     struct hist_entry *left, struct hist_entry *right)
+{
+       struct data__file *d = fmt_to_data_file(fmt);
+
+       return hist_entry__cmp_compute(right, left, COMPUTE_DELTA_ABS, d->idx);
+}
+
 static int64_t
 hist_entry__cmp_ratio(struct perf_hpp_fmt *fmt,
                      struct hist_entry *left, struct hist_entry *right)
@@ -632,6 +659,14 @@ hist_entry__cmp_delta_idx(struct perf_hpp_fmt *fmt __maybe_unused,
                                           sort_compute);
 }
 
+static int64_t
+hist_entry__cmp_delta_abs_idx(struct perf_hpp_fmt *fmt __maybe_unused,
+                             struct hist_entry *left, struct hist_entry *right)
+{
+       return hist_entry__cmp_compute_idx(right, left, COMPUTE_DELTA_ABS,
+                                          sort_compute);
+}
+
 static int64_t
 hist_entry__cmp_ratio_idx(struct perf_hpp_fmt *fmt __maybe_unused,
                          struct hist_entry *left, struct hist_entry *right)
@@ -775,7 +810,7 @@ static const struct option options[] = {
        OPT_BOOLEAN('b', "baseline-only", &show_baseline_only,
                    "Show only items with match in baseline"),
        OPT_CALLBACK('c', "compute", &compute,
-                    "delta,ratio,wdiff:w1,w2 (default delta)",
+                    "delta,delta-abs,ratio,wdiff:w1,w2 (default delta-abs)",
                     "Entries differential computation selection",
                     setup_compute),
        OPT_BOOLEAN('p', "period", &show_period,
@@ -945,6 +980,7 @@ hpp__entry_pair(struct hist_entry *he, struct hist_entry *pair,
 
        switch (idx) {
        case PERF_HPP_DIFF__DELTA:
+       case PERF_HPP_DIFF__DELTA_ABS:
                if (pair->diff.computed)
                        diff = pair->diff.period_ratio_delta;
                else
@@ -1118,6 +1154,10 @@ static void data__hpp_register(struct data__file *d, int idx)
                fmt->color = hpp__color_wdiff;
                fmt->sort  = hist_entry__cmp_wdiff;
                break;
+       case PERF_HPP_DIFF__DELTA_ABS:
+               fmt->color = hpp__color_delta;
+               fmt->sort  = hist_entry__cmp_delta_abs;
+               break;
        default:
                fmt->sort  = hist_entry__cmp_nop;
                break;
@@ -1195,11 +1235,14 @@ static int ui_init(void)
        case COMPUTE_WEIGHTED_DIFF:
                fmt->sort = hist_entry__cmp_wdiff_idx;
                break;
+       case COMPUTE_DELTA_ABS:
+               fmt->sort = hist_entry__cmp_delta_abs_idx;
+               break;
        default:
                BUG_ON(1);
        }
 
-       perf_hpp__register_sort_field(fmt);
+       perf_hpp__prepend_sort_field(fmt);
        return 0;
 }
 
@@ -1249,6 +1292,31 @@ static int data_init(int argc, const char **argv)
        return 0;
 }
 
+static int diff__config(const char *var, const char *value,
+                       void *cb __maybe_unused)
+{
+       if (!strcmp(var, "diff.order")) {
+               sort_compute = perf_config_int(var, value);
+               return 0;
+       }
+       if (!strcmp(var, "diff.compute")) {
+               if (!strcmp(value, "delta")) {
+                       compute = COMPUTE_DELTA;
+               } else if (!strcmp(value, "delta-abs")) {
+                       compute = COMPUTE_DELTA_ABS;
+               } else if (!strcmp(value, "ratio")) {
+                       compute = COMPUTE_RATIO;
+               } else if (!strcmp(value, "wdiff")) {
+                       compute = COMPUTE_WEIGHTED_DIFF;
+               } else {
+                       pr_err("Invalid compute method: %s\n", value);
+                       return -1;
+               }
+       }
+
+       return 0;
+}
+
 int cmd_diff(int argc, const char **argv, const char *prefix __maybe_unused)
 {
        int ret = hists__init();
@@ -1256,6 +1324,8 @@ int cmd_diff(int argc, const char **argv, const char *prefix __maybe_unused)
        if (ret < 0)
                return ret;
 
+       perf_config(diff__config, NULL);
+
        argc = parse_options(argc, argv, options, diff_usage, 0);
 
        if (symbol__init(NULL) < 0)
diff --git a/tools/perf/builtin-ftrace.c b/tools/perf/builtin-ftrace.c
new file mode 100644 (file)
index 0000000..c3e6436
--- /dev/null
@@ -0,0 +1,265 @@
+/*
+ * builtin-ftrace.c
+ *
+ * Copyright (c) 2013  LG Electronics,  Namhyung Kim <namhyung@kernel.org>
+ *
+ * Released under the GPL v2.
+ */
+
+#include "builtin.h"
+#include "perf.h"
+
+#include <unistd.h>
+#include <signal.h>
+
+#include "debug.h"
+#include <subcmd/parse-options.h>
+#include "evlist.h"
+#include "target.h"
+#include "thread_map.h"
+#include "util/config.h"
+
+
+#define DEFAULT_TRACER  "function_graph"
+
+struct perf_ftrace {
+       struct perf_evlist *evlist;
+       struct target target;
+       const char *tracer;
+};
+
+static bool done;
+
+static void sig_handler(int sig __maybe_unused)
+{
+       done = true;
+}
+
+/*
+ * perf_evlist__prepare_workload will send a SIGUSR1 if the fork fails, since
+ * we asked by setting its exec_error to the function below,
+ * ftrace__workload_exec_failed_signal.
+ *
+ * XXX We need to handle this more appropriately, emitting an error, etc.
+ */
+static void ftrace__workload_exec_failed_signal(int signo __maybe_unused,
+                                               siginfo_t *info __maybe_unused,
+                                               void *ucontext __maybe_unused)
+{
+       /* workload_exec_errno = info->si_value.sival_int; */
+       done = true;
+}
+
+static int write_tracing_file(const char *name, const char *val)
+{
+       char *file;
+       int fd, ret = -1;
+       ssize_t size = strlen(val);
+
+       file = get_tracing_file(name);
+       if (!file) {
+               pr_debug("cannot get tracing file: %s\n", name);
+               return -1;
+       }
+
+       fd = open(file, O_WRONLY);
+       if (fd < 0) {
+               pr_debug("cannot open tracing file: %s\n", name);
+               goto out;
+       }
+
+       if (write(fd, val, size) == size)
+               ret = 0;
+       else
+               pr_debug("write '%s' to tracing/%s failed\n", val, name);
+
+       close(fd);
+out:
+       put_tracing_file(file);
+       return ret;
+}
+
+static int reset_tracing_files(struct perf_ftrace *ftrace __maybe_unused)
+{
+       if (write_tracing_file("tracing_on", "0") < 0)
+               return -1;
+
+       if (write_tracing_file("current_tracer", "nop") < 0)
+               return -1;
+
+       if (write_tracing_file("set_ftrace_pid", " ") < 0)
+               return -1;
+
+       return 0;
+}
+
+static int __cmd_ftrace(struct perf_ftrace *ftrace, int argc, const char **argv)
+{
+       char *trace_file;
+       int trace_fd;
+       char *trace_pid;
+       char buf[4096];
+       struct pollfd pollfd = {
+               .events = POLLIN,
+       };
+
+       if (geteuid() != 0) {
+               pr_err("ftrace only works for root!\n");
+               return -1;
+       }
+
+       if (argc < 1)
+               return -1;
+
+       signal(SIGINT, sig_handler);
+       signal(SIGUSR1, sig_handler);
+       signal(SIGCHLD, sig_handler);
+
+       reset_tracing_files(ftrace);
+
+       /* reset ftrace buffer */
+       if (write_tracing_file("trace", "0") < 0)
+               goto out;
+
+       if (perf_evlist__prepare_workload(ftrace->evlist, &ftrace->target,
+                                         argv, false, ftrace__workload_exec_failed_signal) < 0)
+               goto out;
+
+       if (write_tracing_file("current_tracer", ftrace->tracer) < 0) {
+               pr_err("failed to set current_tracer to %s\n", ftrace->tracer);
+               goto out;
+       }
+
+       if (asprintf(&trace_pid, "%d", thread_map__pid(ftrace->evlist->threads, 0)) < 0) {
+               pr_err("failed to allocate pid string\n");
+               goto out;
+       }
+
+       if (write_tracing_file("set_ftrace_pid", trace_pid) < 0) {
+               pr_err("failed to set pid: %s\n", trace_pid);
+               goto out_free_pid;
+       }
+
+       trace_file = get_tracing_file("trace_pipe");
+       if (!trace_file) {
+               pr_err("failed to open trace_pipe\n");
+               goto out_free_pid;
+       }
+
+       trace_fd = open(trace_file, O_RDONLY);
+
+       put_tracing_file(trace_file);
+
+       if (trace_fd < 0) {
+               pr_err("failed to open trace_pipe\n");
+               goto out_free_pid;
+       }
+
+       fcntl(trace_fd, F_SETFL, O_NONBLOCK);
+       pollfd.fd = trace_fd;
+
+       if (write_tracing_file("tracing_on", "1") < 0) {
+               pr_err("can't enable tracing\n");
+               goto out_close_fd;
+       }
+
+       perf_evlist__start_workload(ftrace->evlist);
+
+       while (!done) {
+               if (poll(&pollfd, 1, -1) < 0)
+                       break;
+
+               if (pollfd.revents & POLLIN) {
+                       int n = read(trace_fd, buf, sizeof(buf));
+                       if (n < 0)
+                               break;
+                       if (fwrite(buf, n, 1, stdout) != 1)
+                               break;
+               }
+       }
+
+       write_tracing_file("tracing_on", "0");
+
+       /* read remaining buffer contents */
+       while (true) {
+               int n = read(trace_fd, buf, sizeof(buf));
+               if (n <= 0)
+                       break;
+               if (fwrite(buf, n, 1, stdout) != 1)
+                       break;
+       }
+
+out_close_fd:
+       close(trace_fd);
+out_free_pid:
+       free(trace_pid);
+out:
+       reset_tracing_files(ftrace);
+
+       return done ? 0 : -1;
+}
+
+static int perf_ftrace_config(const char *var, const char *value, void *cb)
+{
+       struct perf_ftrace *ftrace = cb;
+
+       if (prefixcmp(var, "ftrace."))
+               return 0;
+
+       if (strcmp(var, "ftrace.tracer"))
+               return -1;
+
+       if (!strcmp(value, "function_graph") ||
+           !strcmp(value, "function")) {
+               ftrace->tracer = value;
+               return 0;
+       }
+
+       pr_err("Please select \"function_graph\" (default) or \"function\"\n");
+       return -1;
+}
+
+int cmd_ftrace(int argc, const char **argv, const char *prefix __maybe_unused)
+{
+       int ret;
+       struct perf_ftrace ftrace = {
+               .tracer = DEFAULT_TRACER,
+               .target = { .uid = UINT_MAX, },
+       };
+       const char * const ftrace_usage[] = {
+               "perf ftrace [<options>] <command>",
+               "perf ftrace [<options>] -- <command> [<options>]",
+               NULL
+       };
+       const struct option ftrace_options[] = {
+       OPT_STRING('t', "tracer", &ftrace.tracer, "tracer",
+                  "tracer to use: function_graph(default) or function"),
+       OPT_INCR('v', "verbose", &verbose,
+                "be more verbose"),
+       OPT_END()
+       };
+
+       ret = perf_config(perf_ftrace_config, &ftrace);
+       if (ret < 0)
+               return -1;
+
+       argc = parse_options(argc, argv, ftrace_options, ftrace_usage,
+                           PARSE_OPT_STOP_AT_NON_OPTION);
+       if (!argc)
+               usage_with_options(ftrace_usage, ftrace_options);
+
+       ftrace.evlist = perf_evlist__new();
+       if (ftrace.evlist == NULL)
+               return -ENOMEM;
+
+       ret = perf_evlist__create_maps(ftrace.evlist, &ftrace.target);
+       if (ret < 0)
+               goto out_delete_evlist;
+
+       ret = __cmd_ftrace(&ftrace, argc, argv);
+
+out_delete_evlist:
+       perf_evlist__delete(ftrace.evlist);
+
+       return ret;
+}
index 3bdb2c78a21b3f0118d0c7a376222b7bb96c4735..aed0d844e8c271426137abf3b622362913de2340 100644 (file)
@@ -434,7 +434,7 @@ int cmd_help(int argc, const char **argv, const char *prefix __maybe_unused)
        const char * const builtin_help_subcommands[] = {
                "buildid-cache", "buildid-list", "diff", "evlist", "help", "list",
                "record", "report", "bench", "stat", "timechart", "top", "annotate",
-               "script", "sched", "kmem", "lock", "kvm", "test", "inject", "mem", "data",
+               "script", "sched", "kallsyms", "kmem", "lock", "kvm", "test", "inject", "mem", "data",
 #ifdef HAVE_LIBELF_SUPPORT
                "probe",
 #endif
@@ -447,11 +447,13 @@ int cmd_help(int argc, const char **argv, const char *prefix __maybe_unused)
                NULL
        };
        const char *alias;
-       int rc = 0;
+       int rc;
 
        load_command_list("perf-", &main_cmds, &other_cmds);
 
-       perf_config(perf_help_config, &help_format);
+       rc = perf_config(perf_help_config, &help_format);
+       if (rc)
+               return rc;
 
        argc = parse_options_subcommand(argc, argv, builtin_help_options,
                        builtin_help_subcommands, builtin_help_usage, 0);
diff --git a/tools/perf/builtin-kallsyms.c b/tools/perf/builtin-kallsyms.c
new file mode 100644 (file)
index 0000000..224bfc4
--- /dev/null
@@ -0,0 +1,67 @@
+/*
+ * builtin-kallsyms.c
+ *
+ * Builtin command: Look for a symbol in the running kernel and its modules
+ *
+ * Copyright (C) 2017, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
+ *
+ * Released under the GPL v2. (and only v2, not any later version)
+ */
+#include "builtin.h"
+#include <linux/compiler.h>
+#include <subcmd/parse-options.h>
+#include "debug.h"
+#include "machine.h"
+#include "symbol.h"
+
+static int __cmd_kallsyms(int argc, const char **argv)
+{
+       int i;
+       struct machine *machine = machine__new_kallsyms();
+
+       if (machine == NULL) {
+               pr_err("Couldn't read /proc/kallsyms\n");
+               return -1;
+       }
+
+       for (i = 0; i < argc; ++i) {
+               struct map *map;
+               struct symbol *symbol = machine__find_kernel_function_by_name(machine, argv[i], &map);
+
+               if (symbol == NULL) {
+                       printf("%s: not found\n", argv[i]);
+                       continue;
+               }
+
+               printf("%s: %s %s %#" PRIx64 "-%#" PRIx64 " (%#" PRIx64 "-%#" PRIx64")\n",
+                       symbol->name, map->dso->short_name, map->dso->long_name,
+                       map->unmap_ip(map, symbol->start), map->unmap_ip(map, symbol->end),
+                       symbol->start, symbol->end);
+       }
+
+       machine__delete(machine);
+       return 0;
+}
+
+int cmd_kallsyms(int argc, const char **argv, const char *prefix __maybe_unused)
+{
+       const struct option options[] = {
+       OPT_INCR('v', "verbose", &verbose, "be more verbose (show counter open errors, etc)"),
+       OPT_END()
+       };
+       const char * const kallsyms_usage[] = {
+               "perf kallsyms [<options>] symbol_name",
+               NULL
+       };
+
+       argc = parse_options(argc, argv, options, kallsyms_usage, 0);
+       if (argc < 1)
+               usage_with_options(kallsyms_usage, options);
+
+       symbol_conf.sort_by_name = true;
+       symbol_conf.try_vmlinux_path = (symbol_conf.vmlinux_name == NULL);
+       if (symbol__init(NULL) < 0)
+               return -1;
+
+       return __cmd_kallsyms(argc, argv);
+}
index 915869e00d863af03ca409f0f9ffa98147c45385..6da8d083e4e596d821673ed7408c814723029aca 100644 (file)
@@ -1065,7 +1065,7 @@ static void __print_page_alloc_result(struct perf_session *session, int n_lines)
 
                data = rb_entry(next, struct page_stat, node);
                sym = machine__find_kernel_function(machine, data->callsite, &map);
-               if (sym && sym->name)
+               if (sym)
                        caller = sym->name;
                else
                        scnprintf(buf, sizeof(buf), "%"PRIx64, data->callsite);
@@ -1107,7 +1107,7 @@ static void __print_page_caller_result(struct perf_session *session, int n_lines
 
                data = rb_entry(next, struct page_stat, node);
                sym = machine__find_kernel_function(machine, data->callsite, &map);
-               if (sym && sym->name)
+               if (sym)
                        caller = sym->name;
                else
                        scnprintf(buf, sizeof(buf), "%"PRIx64, data->callsite);
@@ -1920,10 +1920,12 @@ int cmd_kmem(int argc, const char **argv, const char *prefix __maybe_unused)
                NULL
        };
        struct perf_session *session;
-       int ret = -1;
        const char errmsg[] = "No %s allocation events found.  Have you run 'perf kmem record --%s'?\n";
+       int ret = perf_config(kmem_config, NULL);
+
+       if (ret)
+               return ret;
 
-       perf_config(kmem_config, NULL);
        argc = parse_options_subcommand(argc, argv, kmem_options,
                                        kmem_subcommands, kmem_usage, 0);
 
@@ -1948,6 +1950,8 @@ int cmd_kmem(int argc, const char **argv, const char *prefix __maybe_unused)
        if (session == NULL)
                return -1;
 
+       ret = -1;
+
        if (kmem_slab) {
                if (!perf_evlist__find_tracepoint_by_name(session->evlist,
                                                          "kmem:kmalloc")) {
index ba9322ff858bead84884e832f84dc73476c6862a..3b9d98b5feef69182a0036b3c2f57d29a3364fc9 100644 (file)
@@ -14,6 +14,7 @@
 #include "util/parse-events.h"
 #include "util/cache.h"
 #include "util/pmu.h"
+#include "util/debug.h"
 #include <subcmd/parse-options.h>
 
 static bool desc_flag = true;
@@ -29,6 +30,8 @@ int cmd_list(int argc, const char **argv, const char *prefix __maybe_unused)
                            "Print extra event descriptions. --no-desc to not print."),
                OPT_BOOLEAN('v', "long-desc", &long_desc_flag,
                            "Print longer event descriptions."),
+               OPT_INCR(0, "debug", &verbose,
+                            "Enable debugging output"),
                OPT_END()
        };
        const char * const list_usage[] = {
index f87996b0cb299636e5803a699710be944d3924e3..1fcebc31a50810b18c50b1a39b28f90f350a6327 100644 (file)
@@ -552,6 +552,8 @@ __cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
        OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel,
                    "Enable kernel symbol demangling"),
        OPT_BOOLEAN(0, "cache", &probe_conf.cache, "Manipulate probe cache"),
+       OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
+                  "Look for files with symbols relative to this directory"),
        OPT_END()
        };
        int ret;
index 4ec10e9427d915a041e0510eaac2ded2daf9fc07..6cd6776052e7a940f78c78d43e623709256a0711 100644 (file)
 #include <asm/bug.h>
 #include <linux/time64.h>
 
+struct switch_output {
+       bool             enabled;
+       bool             signal;
+       unsigned long    size;
+       unsigned long    time;
+       const char      *str;
+       bool             set;
+};
+
 struct record {
        struct perf_tool        tool;
        struct record_opts      opts;
@@ -62,10 +71,33 @@ struct record {
        bool                    no_buildid_cache_set;
        bool                    buildid_all;
        bool                    timestamp_filename;
-       bool                    switch_output;
+       struct switch_output    switch_output;
        unsigned long long      samples;
 };
 
+static volatile int auxtrace_record__snapshot_started;
+static DEFINE_TRIGGER(auxtrace_snapshot_trigger);
+static DEFINE_TRIGGER(switch_output_trigger);
+
+static bool switch_output_signal(struct record *rec)
+{
+       return rec->switch_output.signal &&
+              trigger_is_ready(&switch_output_trigger);
+}
+
+static bool switch_output_size(struct record *rec)
+{
+       return rec->switch_output.size &&
+              trigger_is_ready(&switch_output_trigger) &&
+              (rec->bytes_written >= rec->switch_output.size);
+}
+
+static bool switch_output_time(struct record *rec)
+{
+       return rec->switch_output.time &&
+              trigger_is_ready(&switch_output_trigger);
+}
+
 static int record__write(struct record *rec, void *bf, size_t size)
 {
        if (perf_data_file__write(rec->session->file, bf, size) < 0) {
@@ -74,6 +106,10 @@ static int record__write(struct record *rec, void *bf, size_t size)
        }
 
        rec->bytes_written += size;
+
+       if (switch_output_size(rec))
+               trigger_hit(&switch_output_trigger);
+
        return 0;
 }
 
@@ -193,10 +229,6 @@ static volatile int done;
 static volatile int signr = -1;
 static volatile int child_finished;
 
-static volatile int auxtrace_record__snapshot_started;
-static DEFINE_TRIGGER(auxtrace_snapshot_trigger);
-static DEFINE_TRIGGER(switch_output_trigger);
-
 static void sig_handler(int sig)
 {
        if (sig == SIGCHLD)
@@ -386,7 +418,7 @@ static int record__mmap(struct record *rec)
 
 static int record__open(struct record *rec)
 {
-       char msg[512];
+       char msg[BUFSIZ];
        struct perf_evsel *pos;
        struct perf_evlist *evlist = rec->evlist;
        struct perf_session *session = rec->session;
@@ -623,22 +655,23 @@ record__finish_output(struct record *rec)
 
 static int record__synthesize_workload(struct record *rec, bool tail)
 {
-       struct {
-               struct thread_map map;
-               struct thread_map_data map_data;
-       } thread_map;
+       int err;
+       struct thread_map *thread_map;
 
        if (rec->opts.tail_synthesize != tail)
                return 0;
 
-       thread_map.map.nr = 1;
-       thread_map.map.map[0].pid = rec->evlist->workload.pid;
-       thread_map.map.map[0].comm = NULL;
-       return perf_event__synthesize_thread_map(&rec->tool, &thread_map.map,
+       thread_map = thread_map__new_by_tid(rec->evlist->workload.pid);
+       if (thread_map == NULL)
+               return -1;
+
+       err = perf_event__synthesize_thread_map(&rec->tool, thread_map,
                                                 process_synthesized_event,
                                                 &rec->session->machines.host,
                                                 rec->opts.sample_address,
                                                 rec->opts.proc_map_timeout);
+       thread_map__put(thread_map);
+       return err;
 }
 
 static int record__synthesize(struct record *rec, bool tail);
@@ -712,6 +745,7 @@ static void workload_exec_failed_signal(int signo __maybe_unused,
 }
 
 static void snapshot_sig_handler(int sig);
+static void alarm_sig_handler(int sig);
 
 int __weak
 perf_event__synth_time_conv(const struct perf_event_mmap_page *pc __maybe_unused,
@@ -842,11 +876,11 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
        signal(SIGTERM, sig_handler);
        signal(SIGSEGV, sigsegv_handler);
 
-       if (rec->opts.auxtrace_snapshot_mode || rec->switch_output) {
+       if (rec->opts.auxtrace_snapshot_mode || rec->switch_output.enabled) {
                signal(SIGUSR2, snapshot_sig_handler);
                if (rec->opts.auxtrace_snapshot_mode)
                        trigger_on(&auxtrace_snapshot_trigger);
-               if (rec->switch_output)
+               if (rec->switch_output.enabled)
                        trigger_on(&switch_output_trigger);
        } else {
                signal(SIGUSR2, SIG_IGN);
@@ -1043,6 +1077,10 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
                                err = fd;
                                goto out_child;
                        }
+
+                       /* re-arm the alarm */
+                       if (rec->switch_output.time)
+                               alarm(rec->switch_output.time);
                }
 
                if (hits == rec->samples) {
@@ -1352,6 +1390,78 @@ out_free:
        return ret;
 }
 
+static void switch_output_size_warn(struct record *rec)
+{
+       u64 wakeup_size = perf_evlist__mmap_size(rec->opts.mmap_pages);
+       struct switch_output *s = &rec->switch_output;
+
+       wakeup_size /= 2;
+
+       if (s->size < wakeup_size) {
+               char buf[100];
+
+               unit_number__scnprintf(buf, sizeof(buf), wakeup_size);
+               pr_warning("WARNING: switch-output data size lower than "
+                          "wakeup kernel buffer size (%s) "
+                          "expect bigger perf.data sizes\n", buf);
+       }
+}
+
+static int switch_output_setup(struct record *rec)
+{
+       struct switch_output *s = &rec->switch_output;
+       static struct parse_tag tags_size[] = {
+               { .tag  = 'B', .mult = 1       },
+               { .tag  = 'K', .mult = 1 << 10 },
+               { .tag  = 'M', .mult = 1 << 20 },
+               { .tag  = 'G', .mult = 1 << 30 },
+               { .tag  = 0 },
+       };
+       static struct parse_tag tags_time[] = {
+               { .tag  = 's', .mult = 1        },
+               { .tag  = 'm', .mult = 60       },
+               { .tag  = 'h', .mult = 60*60    },
+               { .tag  = 'd', .mult = 60*60*24 },
+               { .tag  = 0 },
+       };
+       unsigned long val;
+
+       if (!s->set)
+               return 0;
+
+       if (!strcmp(s->str, "signal")) {
+               s->signal = true;
+               pr_debug("switch-output with SIGUSR2 signal\n");
+               goto enabled;
+       }
+
+       val = parse_tag_value(s->str, tags_size);
+       if (val != (unsigned long) -1) {
+               s->size = val;
+               pr_debug("switch-output with %s size threshold\n", s->str);
+               goto enabled;
+       }
+
+       val = parse_tag_value(s->str, tags_time);
+       if (val != (unsigned long) -1) {
+               s->time = val;
+               pr_debug("switch-output with %s time threshold (%lu seconds)\n",
+                        s->str, s->time);
+               goto enabled;
+       }
+
+       return -1;
+
+enabled:
+       rec->timestamp_filename = true;
+       s->enabled              = true;
+
+       if (s->size && !rec->opts.no_buffering)
+               switch_output_size_warn(rec);
+
+       return 0;
+}
+
 static const char * const __record_usage[] = {
        "perf record [<options>] [<command>]",
        "perf record [<options>] -- <command> [<options>]",
@@ -1519,8 +1629,10 @@ static struct option __record_options[] = {
                    "Record build-id of all DSOs regardless of hits"),
        OPT_BOOLEAN(0, "timestamp-filename", &record.timestamp_filename,
                    "append timestamp to output filename"),
-       OPT_BOOLEAN(0, "switch-output", &record.switch_output,
-                   "Switch output when receive SIGUSR2"),
+       OPT_STRING_OPTARG_SET(0, "switch-output", &record.switch_output.str,
+                         &record.switch_output.set, "signal,size,time",
+                         "Switch output when receive SIGUSR2 or cross size,time threshold",
+                         "signal"),
        OPT_BOOLEAN(0, "dry-run", &dry_run,
                    "Parse options then exit"),
        OPT_END()
@@ -1559,7 +1671,9 @@ int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
        if (rec->evlist == NULL)
                return -ENOMEM;
 
-       perf_config(perf_record_config, rec);
+       err = perf_config(perf_record_config, rec);
+       if (err)
+               return err;
 
        argc = parse_options(argc, argv, record_options, record_usage,
                            PARSE_OPT_STOP_AT_NON_OPTION);
@@ -1578,8 +1692,15 @@ int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
                return -EINVAL;
        }
 
-       if (rec->switch_output)
-               rec->timestamp_filename = true;
+       if (switch_output_setup(rec)) {
+               parse_options_usage(record_usage, record_options, "switch-output", 0);
+               return -EINVAL;
+       }
+
+       if (rec->switch_output.time) {
+               signal(SIGALRM, alarm_sig_handler);
+               alarm(rec->switch_output.time);
+       }
 
        if (!rec->itr) {
                rec->itr = auxtrace_record__init(rec->evlist, &err);
@@ -1629,7 +1750,7 @@ int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
 
        if (rec->no_buildid_cache || rec->no_buildid) {
                disable_buildid_cache();
-       } else if (rec->switch_output) {
+       } else if (rec->switch_output.enabled) {
                /*
                 * In 'perf record --switch-output', disable buildid
                 * generation by default to reduce data file switching
@@ -1721,6 +1842,8 @@ out:
 
 static void snapshot_sig_handler(int sig __maybe_unused)
 {
+       struct record *rec = &record;
+
        if (trigger_is_ready(&auxtrace_snapshot_trigger)) {
                trigger_hit(&auxtrace_snapshot_trigger);
                auxtrace_record__snapshot_started = 1;
@@ -1728,6 +1851,14 @@ static void snapshot_sig_handler(int sig __maybe_unused)
                        trigger_error(&auxtrace_snapshot_trigger);
        }
 
-       if (trigger_is_ready(&switch_output_trigger))
+       if (switch_output_signal(rec))
+               trigger_hit(&switch_output_trigger);
+}
+
+static void alarm_sig_handler(int sig __maybe_unused)
+{
+       struct record *rec = &record;
+
+       if (switch_output_time(rec))
                trigger_hit(&switch_output_trigger);
 }
index 06cc759a459758472fc73925222b3eadd28aeee0..dbd7fa0288616e3c29003d9de62e8a62bf068051 100644 (file)
@@ -847,7 +847,9 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
        if (ret < 0)
                return ret;
 
-       perf_config(report__config, &report);
+       ret = perf_config(report__config, &report);
+       if (ret)
+               return ret;
 
        argc = parse_options(argc, argv, options, report_usage, 0);
        if (argc) {
index 5b134b0d1ff37e42a1b82b5973528c4003bb80f3..270eb2d8ca6b24bb6b7c74ff0b068417e655d8e7 100644 (file)
@@ -77,6 +77,22 @@ struct sched_atom {
 
 #define TASK_STATE_TO_CHAR_STR "RSDTtZXxKWP"
 
+/* task state bitmask, copied from include/linux/sched.h */
+#define TASK_RUNNING           0
+#define TASK_INTERRUPTIBLE     1
+#define TASK_UNINTERRUPTIBLE   2
+#define __TASK_STOPPED         4
+#define __TASK_TRACED          8
+/* in tsk->exit_state */
+#define EXIT_DEAD              16
+#define EXIT_ZOMBIE            32
+#define EXIT_TRACE             (EXIT_ZOMBIE | EXIT_DEAD)
+/* in tsk->state again */
+#define TASK_DEAD              64
+#define TASK_WAKEKILL          128
+#define TASK_WAKING            256
+#define TASK_PARKED            512
+
 enum thread_state {
        THREAD_SLEEPING = 0,
        THREAD_WAIT_CPU,
@@ -206,6 +222,7 @@ struct perf_sched {
        bool            show_cpu_visual;
        bool            show_wakeups;
        bool            show_migrations;
+       bool            show_state;
        u64             skipped_samples;
        const char      *time_str;
        struct perf_time_interval ptime;
@@ -216,13 +233,20 @@ struct perf_sched {
 struct thread_runtime {
        u64 last_time;      /* time of previous sched in/out event */
        u64 dt_run;         /* run time */
-       u64 dt_wait;        /* time between CPU access (off cpu) */
+       u64 dt_sleep;       /* time between CPU access by sleep (off cpu) */
+       u64 dt_iowait;      /* time between CPU access by iowait (off cpu) */
+       u64 dt_preempt;     /* time between CPU access by preempt (off cpu) */
        u64 dt_delay;       /* time between wakeup and sched-in */
        u64 ready_to_run;   /* time of wakeup */
 
        struct stats run_stats;
        u64 total_run_time;
+       u64 total_sleep_time;
+       u64 total_iowait_time;
+       u64 total_preempt_time;
+       u64 total_delay_time;
 
+       int last_state;
        u64 migrations;
 };
 
@@ -1821,6 +1845,9 @@ static void timehist_header(struct perf_sched *sched)
        printf(" %-*s  %9s  %9s  %9s", comm_width,
                "task name", "wait time", "sch delay", "run time");
 
+       if (sched->show_state)
+               printf("  %s", "state");
+
        printf("\n");
 
        /*
@@ -1831,9 +1858,14 @@ static void timehist_header(struct perf_sched *sched)
        if (sched->show_cpu_visual)
                printf(" %*s ", ncpus, "");
 
-       printf(" %-*s  %9s  %9s  %9s\n", comm_width,
+       printf(" %-*s  %9s  %9s  %9s", comm_width,
               "[tid/pid]", "(msec)", "(msec)", "(msec)");
 
+       if (sched->show_state)
+               printf("  %5s", "");
+
+       printf("\n");
+
        /*
         * separator
         */
@@ -1846,18 +1878,34 @@ static void timehist_header(struct perf_sched *sched)
                graph_dotted_line, graph_dotted_line, graph_dotted_line,
                graph_dotted_line);
 
+       if (sched->show_state)
+               printf("  %.5s", graph_dotted_line);
+
        printf("\n");
 }
 
+static char task_state_char(struct thread *thread, int state)
+{
+       static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
+       unsigned bit = state ? ffs(state) : 0;
+
+       /* 'I' for idle */
+       if (thread->tid == 0)
+               return 'I';
+
+       return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?';
+}
+
 static void timehist_print_sample(struct perf_sched *sched,
                                  struct perf_sample *sample,
                                  struct addr_location *al,
                                  struct thread *thread,
-                                 u64 t)
+                                 u64 t, int state)
 {
        struct thread_runtime *tr = thread__priv(thread);
        u32 max_cpus = sched->max_cpu + 1;
        char tstr[64];
+       u64 wait_time;
 
        timestamp__scnprintf_usec(t, tstr, sizeof(tstr));
        printf("%15s [%04d] ", tstr, sample->cpu);
@@ -1880,10 +1928,15 @@ static void timehist_print_sample(struct perf_sched *sched,
 
        printf(" %-*s ", comm_width, timehist_get_commstr(thread));
 
-       print_sched_time(tr->dt_wait, 6);
+       wait_time = tr->dt_sleep + tr->dt_iowait + tr->dt_preempt;
+       print_sched_time(wait_time, 6);
+
        print_sched_time(tr->dt_delay, 6);
        print_sched_time(tr->dt_run, 6);
 
+       if (sched->show_state)
+               printf(" %5c ", task_state_char(thread, state));
+
        if (sched->show_wakeups)
                printf("  %-*s", comm_width, "");
 
@@ -1930,8 +1983,11 @@ static void timehist_update_runtime_stats(struct thread_runtime *r,
                                         u64 t, u64 tprev)
 {
        r->dt_delay   = 0;
-       r->dt_wait    = 0;
+       r->dt_sleep   = 0;
+       r->dt_iowait  = 0;
+       r->dt_preempt = 0;
        r->dt_run     = 0;
+
        if (tprev) {
                r->dt_run = t - tprev;
                if (r->ready_to_run) {
@@ -1943,12 +1999,25 @@ static void timehist_update_runtime_stats(struct thread_runtime *r,
 
                if (r->last_time > tprev)
                        pr_debug("time travel: last sched out time for task > previous sched_switch event\n");
-               else if (r->last_time)
-                       r->dt_wait = tprev - r->last_time;
+               else if (r->last_time) {
+                       u64 dt_wait = tprev - r->last_time;
+
+                       if (r->last_state == TASK_RUNNING)
+                               r->dt_preempt = dt_wait;
+                       else if (r->last_state == TASK_UNINTERRUPTIBLE)
+                               r->dt_iowait = dt_wait;
+                       else
+                               r->dt_sleep = dt_wait;
+               }
        }
 
        update_stats(&r->run_stats, r->dt_run);
-       r->total_run_time += r->dt_run;
+
+       r->total_run_time     += r->dt_run;
+       r->total_delay_time   += r->dt_delay;
+       r->total_sleep_time   += r->dt_sleep;
+       r->total_iowait_time  += r->dt_iowait;
+       r->total_preempt_time += r->dt_preempt;
 }
 
 static bool is_idle_sample(struct perf_sample *sample,
@@ -1998,7 +2067,7 @@ static void save_task_callchain(struct perf_sched *sched,
                        break;
 
                sym = node->sym;
-               if (sym && sym->name) {
+               if (sym) {
                        if (!strcmp(sym->name, "schedule") ||
                            !strcmp(sym->name, "__schedule") ||
                            !strcmp(sym->name, "preempt_schedule"))
@@ -2373,6 +2442,8 @@ static int timehist_sched_change_event(struct perf_tool *tool,
        struct thread_runtime *tr = NULL;
        u64 tprev, t = sample->time;
        int rc = 0;
+       int state = perf_evsel__intval(evsel, sample, "prev_state");
+
 
        if (machine__resolve(machine, &al, sample) < 0) {
                pr_err("problem processing %d event. skipping it\n",
@@ -2447,8 +2518,10 @@ static int timehist_sched_change_event(struct perf_tool *tool,
                         * time.  we only care total run time and run stat.
                         */
                        last_tr->dt_run = 0;
-                       last_tr->dt_wait = 0;
                        last_tr->dt_delay = 0;
+                       last_tr->dt_sleep = 0;
+                       last_tr->dt_iowait = 0;
+                       last_tr->dt_preempt = 0;
 
                        if (itr->cursor.nr)
                                callchain_append(&itr->callchain, &itr->cursor, t - tprev);
@@ -2458,7 +2531,7 @@ static int timehist_sched_change_event(struct perf_tool *tool,
        }
 
        if (!sched->summary_only)
-               timehist_print_sample(sched, sample, &al, thread, t);
+               timehist_print_sample(sched, sample, &al, thread, t, state);
 
 out:
        if (sched->hist_time.start == 0 && t >= ptime->start)
@@ -2470,6 +2543,9 @@ out:
                /* time of this sched_switch event becomes last time task seen */
                tr->last_time = sample->time;
 
+               /* last state is used to determine where to account wait time */
+               tr->last_state = state;
+
                /* sched out event for task so reset ready to run time */
                tr->ready_to_run = 0;
        }
@@ -2526,7 +2602,26 @@ static void print_thread_runtime(struct thread *t,
        printf("\n");
 }
 
+static void print_thread_waittime(struct thread *t,
+                                 struct thread_runtime *r)
+{
+       printf("%*s   %5d  %9" PRIu64 " ",
+              comm_width, timehist_get_commstr(t), t->ppid,
+              (u64) r->run_stats.n);
+
+       print_sched_time(r->total_run_time, 8);
+       print_sched_time(r->total_sleep_time, 6);
+       printf(" ");
+       print_sched_time(r->total_iowait_time, 6);
+       printf(" ");
+       print_sched_time(r->total_preempt_time, 6);
+       printf(" ");
+       print_sched_time(r->total_delay_time, 6);
+       printf("\n");
+}
+
 struct total_run_stats {
+       struct perf_sched *sched;
        u64  sched_count;
        u64  task_count;
        u64  total_run_time;
@@ -2545,7 +2640,11 @@ static int __show_thread_runtime(struct thread *t, void *priv)
                stats->task_count++;
                stats->sched_count += r->run_stats.n;
                stats->total_run_time += r->total_run_time;
-               print_thread_runtime(t, r);
+
+               if (stats->sched->show_state)
+                       print_thread_waittime(t, r);
+               else
+                       print_thread_runtime(t, r);
        }
 
        return 0;
@@ -2633,18 +2732,24 @@ static void timehist_print_summary(struct perf_sched *sched,
        u64 hist_time = sched->hist_time.end - sched->hist_time.start;
 
        memset(&totals, 0, sizeof(totals));
+       totals.sched = sched;
 
        if (sched->idle_hist) {
                printf("\nIdle-time summary\n");
                printf("%*s  parent  sched-out  ", comm_width, "comm");
                printf("  idle-time   min-idle    avg-idle    max-idle  stddev  migrations\n");
+       } else if (sched->show_state) {
+               printf("\nWait-time summary\n");
+               printf("%*s  parent   sched-in  ", comm_width, "comm");
+               printf("   run-time      sleep      iowait     preempt       delay\n");
        } else {
                printf("\nRuntime summary\n");
                printf("%*s  parent   sched-in  ", comm_width, "comm");
                printf("   run-time    min-run     avg-run     max-run  stddev  migrations\n");
        }
        printf("%*s            (count)  ", comm_width, "");
-       printf("     (msec)     (msec)      (msec)      (msec)       %%\n");
+       printf("     (msec)     (msec)      (msec)      (msec)       %s\n",
+              sched->show_state ? "(msec)" : "%");
        printf("%.117s\n", graph_dotted_line);
 
        machine__for_each_thread(m, show_thread_runtime, &totals);
@@ -3240,6 +3345,7 @@ int cmd_sched(int argc, const char **argv, const char *prefix __maybe_unused)
        OPT_BOOLEAN('I', "idle-hist", &sched.idle_hist, "Show idle events only"),
        OPT_STRING(0, "time", &sched.time_str, "str",
                   "Time span for analysis (start,stop)"),
+       OPT_BOOLEAN(0, "state", &sched.show_state, "Show task state when sched-out"),
        OPT_PARENT(sched_options)
        };
 
index 2f3ff69fc4e7f0e40c50b2d10d1fc73396f74d4d..c0783b4f7b6c650e2c35ffebfe1e2d7bb3e51e61 100644 (file)
@@ -2180,7 +2180,7 @@ int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused)
                    "Show the mmap events"),
        OPT_BOOLEAN('\0', "show-switch-events", &script.show_switch_events,
                    "Show context switch events (if recorded)"),
-       OPT_BOOLEAN('f', "force", &file.force, "don't complain, do it"),
+       OPT_BOOLEAN('f', "force", &symbol_conf.force, "don't complain, do it"),
        OPT_BOOLEAN(0, "ns", &nanosecs,
                    "Use 9 decimal places when displaying time"),
        OPT_CALLBACK_OPTARG(0, "itrace", &itrace_synth_opts, NULL, "opts",
@@ -2212,6 +2212,7 @@ int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused)
                             PARSE_OPT_STOP_AT_NON_OPTION);
 
        file.path = input_name;
+       file.force = symbol_conf.force;
 
        if (argc > 1 && !strncmp(argv[0], "rec", strlen("rec"))) {
                rec_script_path = get_script_path(argv[1], RECORD_SUFFIX);
index a02f2e9656284fa5d5c4a4b9d45f55c28e167e89..f28719178b519b92be214b2fe0405205e6776652 100644 (file)
@@ -533,7 +533,7 @@ static int store_counter_ids(struct perf_evsel *counter)
 static int __run_perf_stat(int argc, const char **argv)
 {
        int interval = stat_config.interval;
-       char msg[512];
+       char msg[BUFSIZ];
        unsigned long long t0, t1;
        struct perf_evsel *counter;
        struct timespec ts;
index 3df4178ba378167aa6988f8b5a19f470e1607eb0..5a7fd7af3a6de39d4a0d5c5ed5a758c92aeb88f3 100644 (file)
@@ -643,7 +643,7 @@ repeat:
                case -1:
                        if (errno == EINTR)
                                continue;
-                       /* Fall trhu */
+                       __fallthrough;
                default:
                        c = getc(stdin);
                        tcsetattr(0, TCSAFLUSH, &save);
@@ -859,7 +859,7 @@ static void perf_top__mmap_read(struct perf_top *top)
 
 static int perf_top__start_counters(struct perf_top *top)
 {
-       char msg[512];
+       char msg[BUFSIZ];
        struct perf_evsel *counter;
        struct perf_evlist *evlist = top->evlist;
        struct record_opts *opts = &top->record_opts;
@@ -1216,7 +1216,9 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused)
        if (top.evlist == NULL)
                return -ENOMEM;
 
-       perf_config(perf_top_config, &top);
+       status = perf_config(perf_top_config, &top);
+       if (status)
+               return status;
 
        argc = parse_options(argc, argv, options, top_usage, 0);
        if (argc)
index 206bf72b77fcb9c795486ec9fba184b9d6321b7b..40ef9b293d1b4ffa0213108ca3ec391621c1267d 100644 (file)
@@ -40,6 +40,7 @@
 
 #include <libaudit.h> /* FIXME: Still needed for audit_errno_to_name */
 #include <stdlib.h>
+#include <string.h>
 #include <linux/err.h>
 #include <linux/filter.h>
 #include <linux/audit.h>
@@ -2699,6 +2700,91 @@ static void evlist__set_evsel_handler(struct perf_evlist *evlist, void *handler)
                evsel->handler = handler;
 }
 
+/*
+ * XXX: Hackish, just splitting the combined -e+--event (syscalls
+ * (raw_syscalls:{sys_{enter,exit}} + events (tracepoints, HW, SW, etc) to use
+ * existing facilities unchanged (trace->ev_qualifier + parse_options()).
+ *
+ * It'd be better to introduce a parse_options() variant that would return a
+ * list with the terms it didn't match to an event...
+ */
+static int trace__parse_events_option(const struct option *opt, const char *str,
+                                     int unset __maybe_unused)
+{
+       struct trace *trace = (struct trace *)opt->value;
+       const char *s = str;
+       char *sep = NULL, *lists[2] = { NULL, NULL, };
+       int len = strlen(str), err = -1, list;
+       char *strace_groups_dir = system_path(STRACE_GROUPS_DIR);
+       char group_name[PATH_MAX];
+
+       if (strace_groups_dir == NULL)
+               return -1;
+
+       if (*s == '!') {
+               ++s;
+               trace->not_ev_qualifier = true;
+       }
+
+       while (1) {
+               if ((sep = strchr(s, ',')) != NULL)
+                       *sep = '\0';
+
+               list = 0;
+               if (syscalltbl__id(trace->sctbl, s) >= 0) {
+                       list = 1;
+               } else {
+                       path__join(group_name, sizeof(group_name), strace_groups_dir, s);
+                       if (access(group_name, R_OK) == 0)
+                               list = 1;
+               }
+
+               if (lists[list]) {
+                       sprintf(lists[list] + strlen(lists[list]), ",%s", s);
+               } else {
+                       lists[list] = malloc(len);
+                       if (lists[list] == NULL)
+                               goto out;
+                       strcpy(lists[list], s);
+               }
+
+               if (!sep)
+                       break;
+
+               *sep = ',';
+               s = sep + 1;
+       }
+
+       if (lists[1] != NULL) {
+               struct strlist_config slist_config = {
+                       .dirname = strace_groups_dir,
+               };
+
+               trace->ev_qualifier = strlist__new(lists[1], &slist_config);
+               if (trace->ev_qualifier == NULL) {
+                       fputs("Not enough memory to parse event qualifier", trace->output);
+                       goto out;
+               }
+
+               if (trace__validate_ev_qualifier(trace))
+                       goto out;
+       }
+
+       err = 0;
+
+       if (lists[0]) {
+               struct option o = OPT_CALLBACK('e', "event", &trace->evlist, "event",
+                                              "event selector. use 'perf list' to list available events",
+                                              parse_events_option);
+               err = parse_events_option(&o, lists[0], 0);
+       }
+out:
+       if (sep)
+               *sep = ',';
+
+       return err;
+}
+
 int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused)
 {
        const char *trace_usage[] = {
@@ -2730,15 +2816,15 @@ int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused)
                .max_stack = UINT_MAX,
        };
        const char *output_name = NULL;
-       const char *ev_qualifier_str = NULL;
        const struct option trace_options[] = {
-       OPT_CALLBACK(0, "event", &trace.evlist, "event",
-                    "event selector. use 'perf list' to list available events",
-                    parse_events_option),
+       OPT_CALLBACK('e', "event", &trace, "event",
+                    "event/syscall selector. use 'perf list' to list available events",
+                    trace__parse_events_option),
        OPT_BOOLEAN(0, "comm", &trace.show_comm,
                    "show the thread COMM next to its id"),
        OPT_BOOLEAN(0, "tool_stats", &trace.show_tool_stats, "show tool stats"),
-       OPT_STRING('e', "expr", &ev_qualifier_str, "expr", "list of syscalls to trace"),
+       OPT_CALLBACK(0, "expr", &trace, "expr", "list of syscalls/events to trace",
+                    trace__parse_events_option),
        OPT_STRING('o', "output", &output_name, "file", "output file name"),
        OPT_STRING('i', "input", &input_name, "file", "Analyze events in file"),
        OPT_STRING('p', "pid", &trace.opts.target.pid, "pid",
@@ -2863,7 +2949,7 @@ int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused)
                return -1;
        }
 
-       if (!trace.trace_syscalls && ev_qualifier_str) {
+       if (!trace.trace_syscalls && trace.ev_qualifier) {
                pr_err("The -e option can't be used with --no-syscalls.\n");
                goto out;
        }
@@ -2878,28 +2964,6 @@ int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused)
 
        trace.open_id = syscalltbl__id(trace.sctbl, "open");
 
-       if (ev_qualifier_str != NULL) {
-               const char *s = ev_qualifier_str;
-               struct strlist_config slist_config = {
-                       .dirname = system_path(STRACE_GROUPS_DIR),
-               };
-
-               trace.not_ev_qualifier = *s == '!';
-               if (trace.not_ev_qualifier)
-                       ++s;
-               trace.ev_qualifier = strlist__new(s, &slist_config);
-               if (trace.ev_qualifier == NULL) {
-                       fputs("Not enough memory to parse event qualifier",
-                             trace.output);
-                       err = -ENOMEM;
-                       goto out_close;
-               }
-
-               err = trace__validate_ev_qualifier(&trace);
-               if (err)
-                       goto out_close;
-       }
-
        err = target__validate(&trace.opts.target);
        if (err) {
                target__strerror(&trace.opts.target, err, bf, sizeof(bf));
index 0bcf68e98ccc2e992e005b41f0e6a9fcaf9b4e27..036e1e35b1a8fa5ed64439cb5c73d30558ed71b4 100644 (file)
@@ -23,6 +23,7 @@ int cmd_diff(int argc, const char **argv, const char *prefix);
 int cmd_evlist(int argc, const char **argv, const char *prefix);
 int cmd_help(int argc, const char **argv, const char *prefix);
 int cmd_sched(int argc, const char **argv, const char *prefix);
+int cmd_kallsyms(int argc, const char **argv, const char *prefix);
 int cmd_list(int argc, const char **argv, const char *prefix);
 int cmd_record(int argc, const char **argv, const char *prefix);
 int cmd_report(int argc, const char **argv, const char *prefix);
@@ -40,6 +41,7 @@ int cmd_trace(int argc, const char **argv, const char *prefix);
 int cmd_inject(int argc, const char **argv, const char *prefix);
 int cmd_mem(int argc, const char **argv, const char *prefix);
 int cmd_data(int argc, const char **argv, const char *prefix);
+int cmd_ftrace(int argc, const char **argv, const char *prefix);
 
 int find_scripts(char **scripts_array, char **scripts_path_array);
 #endif
index ab5cbaa170d0d882510e58d1d4e57e44c8843b3f..ac3efd396a727298802b49e6a24e530f696fcdf7 100644 (file)
@@ -11,7 +11,9 @@ perf-data                     mainporcelain common
 perf-diff                      mainporcelain common
 perf-config                    mainporcelain common
 perf-evlist                    mainporcelain common
+perf-ftrace                    mainporcelain common
 perf-inject                    mainporcelain common
+perf-kallsyms                  mainporcelain common
 perf-kmem                      mainporcelain common
 perf-kvm                       mainporcelain common
 perf-list                      mainporcelain common
index aa23b3347d6b1f36dbe59f072d0f61caa7479d3d..6d5479e03e0dbc080531f3037d5fe5a6644bc3c9 100644 (file)
@@ -29,7 +29,6 @@ const char perf_usage_string[] =
 const char perf_more_info_string[] =
        "See 'perf help COMMAND' for more information on a specific command.";
 
-int use_browser = -1;
 static int use_pager = -1;
 const char *input_name;
 
@@ -47,6 +46,7 @@ static struct cmd_struct commands[] = {
        { "diff",       cmd_diff,       0 },
        { "evlist",     cmd_evlist,     0 },
        { "help",       cmd_help,       0 },
+       { "kallsyms",   cmd_kallsyms,   0 },
        { "list",       cmd_list,       0 },
        { "record",     cmd_record,     0 },
        { "report",     cmd_report,     0 },
@@ -71,6 +71,7 @@ static struct cmd_struct commands[] = {
        { "inject",     cmd_inject,     0 },
        { "mem",        cmd_mem,        0 },
        { "data",       cmd_data,       0 },
+       { "ftrace",     cmd_ftrace,     0 },
 };
 
 struct pager_config {
@@ -89,11 +90,12 @@ static int pager_command_config(const char *var, const char *value, void *data)
 /* returns 0 for "no pager", 1 for "use pager", and -1 for "not specified" */
 int check_pager_config(const char *cmd)
 {
+       int err;
        struct pager_config c;
        c.cmd = cmd;
        c.val = -1;
-       perf_config(pager_command_config, &c);
-       return c.val;
+       err = perf_config(pager_command_config, &c);
+       return err ?: c.val;
 }
 
 static int browser_command_config(const char *var, const char *value, void *data)
@@ -112,11 +114,12 @@ static int browser_command_config(const char *var, const char *value, void *data
  */
 static int check_browser_config(const char *cmd)
 {
+       int err;
        struct pager_config c;
        c.cmd = cmd;
        c.val = -1;
-       perf_config(browser_command_config, &c);
-       return c.val;
+       err = perf_config(browser_command_config, &c);
+       return err ?: c.val;
 }
 
 static void commit_pager_choice(void)
@@ -329,8 +332,6 @@ static int handle_alias(int *argcp, const char ***argv)
        return ret;
 }
 
-const char perf_version_string[] = PERF_VERSION;
-
 #define RUN_SETUP      (1<<0)
 #define USE_PAGER      (1<<1)
 
@@ -510,6 +511,7 @@ static void cache_line_size(int *cacheline_sizep)
 
 int main(int argc, const char **argv)
 {
+       int err;
        const char *cmd;
        char sbuf[STRERR_BUFSIZE];
        int value;
@@ -535,7 +537,9 @@ int main(int argc, const char **argv)
        srandom(time(NULL));
 
        perf_config__init();
-       perf_config(perf_default_config, NULL);
+       err = perf_config(perf_default_config, NULL);
+       if (err)
+               return err;
        set_buildid_dir(NULL);
 
        /* get debugfs/tracefs mount point from /proc/mounts */
diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/uncore-cache.json b/tools/perf/pmu-events/arch/x86/broadwellde/uncore-cache.json
new file mode 100644 (file)
index 0000000..076459c
--- /dev/null
@@ -0,0 +1,317 @@
+[
+    {
+        "BriefDescription": "Uncore cache clock ticks. Derived from unc_c_clockticks",
+        "Counter": "0,1,2,3",
+        "EventName": "UNC_C_CLOCKTICKS",
+        "PerPkg": "1",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "All LLC Misses (code+ data rd + data wr - including demand and prefetch). Derived from unc_c_llc_lookup.any",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x34",
+        "EventName": "UNC_C_LLC_LOOKUP.ANY",
+        "Filter": "filter_state=0x1",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x11",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "M line evictions from LLC (writebacks to memory). Derived from unc_c_llc_victims.m_state",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x37",
+        "EventName": "UNC_C_LLC_VICTIMS.M_STATE",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x1",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "LLC misses - demand and prefetch data reads - excludes LLC prefetches. Derived from unc_c_tor_inserts.miss_opcode",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x35",
+        "EventName": "LLC_MISSES.DATA_READ",
+        "Filter": "filter_opc=0x182",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x3",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "LLC misses - Uncacheable reads (from cpu) . Derived from unc_c_tor_inserts.miss_opcode",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x35",
+        "EventName": "LLC_MISSES.UNCACHEABLE",
+        "Filter": "filter_opc=0x187",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x3",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "MMIO reads. Derived from unc_c_tor_inserts.miss_opcode",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x35",
+        "EventName": "LLC_MISSES.MMIO_READ",
+        "Filter": "filter_opc=0x187,filter_nc=1",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x3",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "MMIO writes. Derived from unc_c_tor_inserts.miss_opcode",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x35",
+        "EventName": "LLC_MISSES.MMIO_WRITE",
+        "Filter": "filter_opc=0x18f,filter_nc=1",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x3",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "LLC prefetch misses for RFO. Derived from unc_c_tor_inserts.miss_opcode",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x35",
+        "EventName": "LLC_MISSES.RFO_LLC_PREFETCH",
+        "Filter": "filter_opc=0x190",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x3",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "LLC prefetch misses for code reads. Derived from unc_c_tor_inserts.miss_opcode",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x35",
+        "EventName": "LLC_MISSES.CODE_LLC_PREFETCH",
+        "Filter": "filter_opc=0x191",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x3",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "LLC prefetch misses for data reads. Derived from unc_c_tor_inserts.miss_opcode",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x35",
+        "EventName": "LLC_MISSES.DATA_LLC_PREFETCH",
+        "Filter": "filter_opc=0x192",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x3",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "LLC misses for PCIe read current. Derived from unc_c_tor_inserts.miss_opcode",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x35",
+        "EventName": "LLC_MISSES.PCIE_READ",
+        "Filter": "filter_opc=0x19e",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x3",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "ItoM write misses (as part of fast string memcpy stores) + PCIe full line writes. Derived from unc_c_tor_inserts.miss_opcode",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x35",
+        "EventName": "LLC_MISSES.PCIE_WRITE",
+        "Filter": "filter_opc=0x1c8",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x3",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "PCIe write misses (full cache line). Derived from unc_c_tor_inserts.miss_opcode",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x35",
+        "EventName": "LLC_MISSES.PCIE_NON_SNOOP_WRITE",
+        "Filter": "filter_opc=0x1c8,filter_tid=0x3e",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x3",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "PCIe writes (partial cache line). Derived from unc_c_tor_inserts.opcode",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x35",
+        "EventName": "LLC_REFERENCES.PCIE_NS_PARTIAL_WRITE",
+        "Filter": "filter_opc=0x180,filter_tid=0x3e",
+        "PerPkg": "1",
+        "UMask": "0x1",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "L2 demand and L2 prefetch code references to LLC. Derived from unc_c_tor_inserts.opcode",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x35",
+        "EventName": "LLC_REFERENCES.CODE_LLC_PREFETCH",
+        "Filter": "filter_opc=0x181",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x1",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "Streaming stores (full cache line). Derived from unc_c_tor_inserts.opcode",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x35",
+        "EventName": "LLC_REFERENCES.STREAMING_FULL",
+        "Filter": "filter_opc=0x18c",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x1",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "Streaming stores (partial cache line). Derived from unc_c_tor_inserts.opcode",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x35",
+        "EventName": "LLC_REFERENCES.STREAMING_PARTIAL",
+        "Filter": "filter_opc=0x18d",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x1",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "PCIe read current. Derived from unc_c_tor_inserts.opcode",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x35",
+        "EventName": "LLC_REFERENCES.PCIE_READ",
+        "Filter": "filter_opc=0x19e",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x1",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "PCIe write references (full cache line). Derived from unc_c_tor_inserts.opcode",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x35",
+        "EventName": "LLC_REFERENCES.PCIE_WRITE",
+        "Filter": "filter_opc=0x1c8,filter_tid=0x3e",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x1",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "Occupancy counter for LLC data reads (demand and L2 prefetch). Derived from unc_c_tor_occupancy.miss_opcode",
+        "EventCode": "0x36",
+        "EventName": "UNC_C_TOR_OCCUPANCY.LLC_DATA_READ",
+        "Filter": "filter_opc=0x182",
+        "PerPkg": "1",
+        "UMask": "0x3",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "read requests to home agent. Derived from unc_h_requests.reads",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x1",
+        "EventName": "UNC_H_REQUESTS.READS",
+        "PerPkg": "1",
+        "UMask": "0x3",
+        "Unit": "HA"
+    },
+    {
+        "BriefDescription": "read requests to local home agent. Derived from unc_h_requests.reads_local",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x1",
+        "EventName": "UNC_H_REQUESTS.READS_LOCAL",
+        "PerPkg": "1",
+        "UMask": "0x1",
+        "Unit": "HA"
+    },
+    {
+        "BriefDescription": "read requests to remote home agent. Derived from unc_h_requests.reads_remote",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x1",
+        "EventName": "UNC_H_REQUESTS.READS_REMOTE",
+        "PerPkg": "1",
+        "UMask": "0x2",
+        "Unit": "HA"
+    },
+    {
+        "BriefDescription": "write requests to home agent. Derived from unc_h_requests.writes",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x1",
+        "EventName": "UNC_H_REQUESTS.WRITES",
+        "PerPkg": "1",
+        "UMask": "0xC",
+        "Unit": "HA"
+    },
+    {
+        "BriefDescription": "write requests to local home agent. Derived from unc_h_requests.writes_local",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x1",
+        "EventName": "UNC_H_REQUESTS.WRITES_LOCAL",
+        "PerPkg": "1",
+        "UMask": "0x4",
+        "Unit": "HA"
+    },
+    {
+        "BriefDescription": "write requests to remote home agent. Derived from unc_h_requests.writes_remote",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x1",
+        "EventName": "UNC_H_REQUESTS.WRITES_REMOTE",
+        "PerPkg": "1",
+        "UMask": "0x8",
+        "Unit": "HA"
+    },
+    {
+        "BriefDescription": "Conflict requests (requests for same address from multiple agents simultaneously). Derived from unc_h_snoop_resp.rspcnflct",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x21",
+        "EventName": "UNC_H_SNOOP_RESP.RSPCNFLCT",
+        "PerPkg": "1",
+        "UMask": "0x40",
+        "Unit": "HA"
+    },
+    {
+        "BriefDescription": "M line forwarded from remote cache along with writeback to memory. Derived from unc_h_snoop_resp.rsp_fwd_wb",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x21",
+        "EventName": "UNC_H_SNOOP_RESP.RSP_FWD_WB",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x20",
+        "Unit": "HA"
+    },
+    {
+        "BriefDescription": "M line forwarded from remote cache with no writeback to memory. Derived from unc_h_snoop_resp.rspifwd",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x21",
+        "EventName": "UNC_H_SNOOP_RESP.RSPIFWD",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x4",
+        "Unit": "HA"
+    },
+    {
+        "BriefDescription": "Shared line response from remote cache. Derived from unc_h_snoop_resp.rsps",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x21",
+        "EventName": "UNC_H_SNOOP_RESP.RSPS",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x2",
+        "Unit": "HA"
+    },
+    {
+        "BriefDescription": "Shared line forwarded from remote cache. Derived from unc_h_snoop_resp.rspsfwd",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x21",
+        "EventName": "UNC_H_SNOOP_RESP.RSPSFWD",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x8",
+        "Unit": "HA"
+    }
+]
diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/uncore-memory.json b/tools/perf/pmu-events/arch/x86/broadwellde/uncore-memory.json
new file mode 100644 (file)
index 0000000..d17dc23
--- /dev/null
@@ -0,0 +1,83 @@
+[
+    {
+        "BriefDescription": "read requests to memory controller. Derived from unc_m_cas_count.rd",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x4",
+        "EventName": "UNC_M_CAS_COUNT.RD",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x3",
+        "Unit": "iMC"
+    },
+    {
+        "BriefDescription": "write requests to memory controller. Derived from unc_m_cas_count.wr",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x4",
+        "EventName": "UNC_M_CAS_COUNT.WR",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0xC",
+        "Unit": "iMC"
+    },
+    {
+        "BriefDescription": "Memory controller clock ticks. Derived from unc_m_clockticks",
+        "Counter": "0,1,2,3",
+        "EventName": "UNC_M_CLOCKTICKS",
+        "PerPkg": "1",
+        "Unit": "iMC"
+    },
+    {
+        "BriefDescription": "Cycles where DRAM ranks are in power down (CKE) mode. Derived from unc_m_power_channel_ppd",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x85",
+        "EventName": "UNC_M_POWER_CHANNEL_PPD",
+        "MetricExpr": "(UNC_M_POWER_CHANNEL_PPD / UNC_M_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "iMC"
+    },
+    {
+        "BriefDescription": "Cycles all ranks are in critical thermal throttle. Derived from unc_m_power_critical_throttle_cycles",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x86",
+        "EventName": "UNC_M_POWER_CRITICAL_THROTTLE_CYCLES",
+        "MetricExpr": "(UNC_M_POWER_CRITICAL_THROTTLE_CYCLES / UNC_M_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "iMC"
+    },
+    {
+        "BriefDescription": "Cycles Memory is in self refresh power mode. Derived from unc_m_power_self_refresh",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x43",
+        "EventName": "UNC_M_POWER_SELF_REFRESH",
+        "MetricExpr": "(UNC_M_POWER_SELF_REFRESH / UNC_M_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "iMC"
+    },
+    {
+        "BriefDescription": "Pre-charges due to page misses. Derived from unc_m_pre_count.page_miss",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x2",
+        "EventName": "UNC_M_PRE_COUNT.PAGE_MISS",
+        "PerPkg": "1",
+        "UMask": "0x1",
+        "Unit": "iMC"
+    },
+    {
+        "BriefDescription": "Pre-charge for reads. Derived from unc_m_pre_count.rd",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x2",
+        "EventName": "UNC_M_PRE_COUNT.RD",
+        "PerPkg": "1",
+        "UMask": "0x4",
+        "Unit": "iMC"
+    },
+    {
+        "BriefDescription": "Pre-charge for writes. Derived from unc_m_pre_count.wr",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x2",
+        "EventName": "UNC_M_PRE_COUNT.WR",
+        "PerPkg": "1",
+        "UMask": "0x8",
+        "Unit": "iMC"
+    }
+]
diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/uncore-power.json b/tools/perf/pmu-events/arch/x86/broadwellde/uncore-power.json
new file mode 100644 (file)
index 0000000..b44d430
--- /dev/null
@@ -0,0 +1,84 @@
+[
+    {
+        "BriefDescription": "PCU clock ticks. Use to get percentages of PCU cycles events. Derived from unc_p_clockticks",
+        "Counter": "0,1,2,3",
+        "EventName": "UNC_P_CLOCKTICKS",
+        "PerPkg": "1",
+        "Unit": "PCU"
+    },
+    {
+        "BriefDescription": "C0 and C1. Derived from unc_p_power_state_occupancy.cores_c0",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x80",
+        "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C0",
+        "Filter": "occ_sel=1",
+        "MetricExpr": "(UNC_P_POWER_STATE_OCCUPANCY.CORES_C0 / UNC_P_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "PCU"
+    },
+    {
+        "BriefDescription": "C3. Derived from unc_p_power_state_occupancy.cores_c3",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x80",
+        "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C3",
+        "Filter": "occ_sel=2",
+        "MetricExpr": "(UNC_P_POWER_STATE_OCCUPANCY.CORES_C3 / UNC_P_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "PCU"
+    },
+    {
+        "BriefDescription": "C6 and C7. Derived from unc_p_power_state_occupancy.cores_c6",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x80",
+        "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C6",
+        "Filter": "occ_sel=3",
+        "MetricExpr": "(UNC_P_POWER_STATE_OCCUPANCY.CORES_C6 / UNC_P_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "PCU"
+    },
+    {
+        "BriefDescription": "External Prochot. Derived from unc_p_prochot_external_cycles",
+        "Counter": "0,1,2,3",
+        "EventCode": "0xA",
+        "EventName": "UNC_P_PROCHOT_EXTERNAL_CYCLES",
+        "MetricExpr": "(UNC_P_PROCHOT_EXTERNAL_CYCLES / UNC_P_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "PCU"
+    },
+    {
+        "BriefDescription": "Thermal Strongest Upper Limit Cycles. Derived from unc_p_freq_max_limit_thermal_cycles",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x4",
+        "EventName": "UNC_P_FREQ_MAX_LIMIT_THERMAL_CYCLES",
+        "MetricExpr": "(UNC_P_FREQ_MAX_LIMIT_THERMAL_CYCLES / UNC_P_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "PCU"
+    },
+    {
+        "BriefDescription": "OS Strongest Upper Limit Cycles. Derived from unc_p_freq_max_os_cycles",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x6",
+        "EventName": "UNC_P_FREQ_MAX_OS_CYCLES",
+        "MetricExpr": "(UNC_P_FREQ_MAX_OS_CYCLES / UNC_P_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "PCU"
+    },
+    {
+        "BriefDescription": "Power Strongest Upper Limit Cycles. Derived from unc_p_freq_max_power_cycles",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x5",
+        "EventName": "UNC_P_FREQ_MAX_POWER_CYCLES",
+        "MetricExpr": "(UNC_P_FREQ_MAX_POWER_CYCLES / UNC_P_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "PCU"
+    },
+    {
+        "BriefDescription": "Cycles spent changing Frequency. Derived from unc_p_freq_trans_cycles",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x74",
+        "EventName": "UNC_P_FREQ_TRANS_CYCLES",
+        "MetricExpr": "(UNC_P_FREQ_TRANS_CYCLES / UNC_P_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "PCU"
+    }
+]
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/uncore-cache.json b/tools/perf/pmu-events/arch/x86/broadwellx/uncore-cache.json
new file mode 100644 (file)
index 0000000..076459c
--- /dev/null
@@ -0,0 +1,317 @@
+[
+    {
+        "BriefDescription": "Uncore cache clock ticks. Derived from unc_c_clockticks",
+        "Counter": "0,1,2,3",
+        "EventName": "UNC_C_CLOCKTICKS",
+        "PerPkg": "1",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "All LLC Misses (code+ data rd + data wr - including demand and prefetch). Derived from unc_c_llc_lookup.any",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x34",
+        "EventName": "UNC_C_LLC_LOOKUP.ANY",
+        "Filter": "filter_state=0x1",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x11",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "M line evictions from LLC (writebacks to memory). Derived from unc_c_llc_victims.m_state",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x37",
+        "EventName": "UNC_C_LLC_VICTIMS.M_STATE",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x1",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "LLC misses - demand and prefetch data reads - excludes LLC prefetches. Derived from unc_c_tor_inserts.miss_opcode",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x35",
+        "EventName": "LLC_MISSES.DATA_READ",
+        "Filter": "filter_opc=0x182",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x3",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "LLC misses - Uncacheable reads (from cpu) . Derived from unc_c_tor_inserts.miss_opcode",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x35",
+        "EventName": "LLC_MISSES.UNCACHEABLE",
+        "Filter": "filter_opc=0x187",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x3",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "MMIO reads. Derived from unc_c_tor_inserts.miss_opcode",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x35",
+        "EventName": "LLC_MISSES.MMIO_READ",
+        "Filter": "filter_opc=0x187,filter_nc=1",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x3",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "MMIO writes. Derived from unc_c_tor_inserts.miss_opcode",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x35",
+        "EventName": "LLC_MISSES.MMIO_WRITE",
+        "Filter": "filter_opc=0x18f,filter_nc=1",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x3",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "LLC prefetch misses for RFO. Derived from unc_c_tor_inserts.miss_opcode",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x35",
+        "EventName": "LLC_MISSES.RFO_LLC_PREFETCH",
+        "Filter": "filter_opc=0x190",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x3",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "LLC prefetch misses for code reads. Derived from unc_c_tor_inserts.miss_opcode",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x35",
+        "EventName": "LLC_MISSES.CODE_LLC_PREFETCH",
+        "Filter": "filter_opc=0x191",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x3",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "LLC prefetch misses for data reads. Derived from unc_c_tor_inserts.miss_opcode",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x35",
+        "EventName": "LLC_MISSES.DATA_LLC_PREFETCH",
+        "Filter": "filter_opc=0x192",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x3",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "LLC misses for PCIe read current. Derived from unc_c_tor_inserts.miss_opcode",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x35",
+        "EventName": "LLC_MISSES.PCIE_READ",
+        "Filter": "filter_opc=0x19e",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x3",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "ItoM write misses (as part of fast string memcpy stores) + PCIe full line writes. Derived from unc_c_tor_inserts.miss_opcode",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x35",
+        "EventName": "LLC_MISSES.PCIE_WRITE",
+        "Filter": "filter_opc=0x1c8",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x3",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "PCIe write misses (full cache line). Derived from unc_c_tor_inserts.miss_opcode",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x35",
+        "EventName": "LLC_MISSES.PCIE_NON_SNOOP_WRITE",
+        "Filter": "filter_opc=0x1c8,filter_tid=0x3e",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x3",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "PCIe writes (partial cache line). Derived from unc_c_tor_inserts.opcode",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x35",
+        "EventName": "LLC_REFERENCES.PCIE_NS_PARTIAL_WRITE",
+        "Filter": "filter_opc=0x180,filter_tid=0x3e",
+        "PerPkg": "1",
+        "UMask": "0x1",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "L2 demand and L2 prefetch code references to LLC. Derived from unc_c_tor_inserts.opcode",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x35",
+        "EventName": "LLC_REFERENCES.CODE_LLC_PREFETCH",
+        "Filter": "filter_opc=0x181",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x1",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "Streaming stores (full cache line). Derived from unc_c_tor_inserts.opcode",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x35",
+        "EventName": "LLC_REFERENCES.STREAMING_FULL",
+        "Filter": "filter_opc=0x18c",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x1",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "Streaming stores (partial cache line). Derived from unc_c_tor_inserts.opcode",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x35",
+        "EventName": "LLC_REFERENCES.STREAMING_PARTIAL",
+        "Filter": "filter_opc=0x18d",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x1",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "PCIe read current. Derived from unc_c_tor_inserts.opcode",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x35",
+        "EventName": "LLC_REFERENCES.PCIE_READ",
+        "Filter": "filter_opc=0x19e",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x1",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "PCIe write references (full cache line). Derived from unc_c_tor_inserts.opcode",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x35",
+        "EventName": "LLC_REFERENCES.PCIE_WRITE",
+        "Filter": "filter_opc=0x1c8,filter_tid=0x3e",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x1",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "Occupancy counter for LLC data reads (demand and L2 prefetch). Derived from unc_c_tor_occupancy.miss_opcode",
+        "EventCode": "0x36",
+        "EventName": "UNC_C_TOR_OCCUPANCY.LLC_DATA_READ",
+        "Filter": "filter_opc=0x182",
+        "PerPkg": "1",
+        "UMask": "0x3",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "read requests to home agent. Derived from unc_h_requests.reads",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x1",
+        "EventName": "UNC_H_REQUESTS.READS",
+        "PerPkg": "1",
+        "UMask": "0x3",
+        "Unit": "HA"
+    },
+    {
+        "BriefDescription": "read requests to local home agent. Derived from unc_h_requests.reads_local",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x1",
+        "EventName": "UNC_H_REQUESTS.READS_LOCAL",
+        "PerPkg": "1",
+        "UMask": "0x1",
+        "Unit": "HA"
+    },
+    {
+        "BriefDescription": "read requests to remote home agent. Derived from unc_h_requests.reads_remote",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x1",
+        "EventName": "UNC_H_REQUESTS.READS_REMOTE",
+        "PerPkg": "1",
+        "UMask": "0x2",
+        "Unit": "HA"
+    },
+    {
+        "BriefDescription": "write requests to home agent. Derived from unc_h_requests.writes",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x1",
+        "EventName": "UNC_H_REQUESTS.WRITES",
+        "PerPkg": "1",
+        "UMask": "0xC",
+        "Unit": "HA"
+    },
+    {
+        "BriefDescription": "write requests to local home agent. Derived from unc_h_requests.writes_local",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x1",
+        "EventName": "UNC_H_REQUESTS.WRITES_LOCAL",
+        "PerPkg": "1",
+        "UMask": "0x4",
+        "Unit": "HA"
+    },
+    {
+        "BriefDescription": "write requests to remote home agent. Derived from unc_h_requests.writes_remote",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x1",
+        "EventName": "UNC_H_REQUESTS.WRITES_REMOTE",
+        "PerPkg": "1",
+        "UMask": "0x8",
+        "Unit": "HA"
+    },
+    {
+        "BriefDescription": "Conflict requests (requests for same address from multiple agents simultaneously). Derived from unc_h_snoop_resp.rspcnflct",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x21",
+        "EventName": "UNC_H_SNOOP_RESP.RSPCNFLCT",
+        "PerPkg": "1",
+        "UMask": "0x40",
+        "Unit": "HA"
+    },
+    {
+        "BriefDescription": "M line forwarded from remote cache along with writeback to memory. Derived from unc_h_snoop_resp.rsp_fwd_wb",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x21",
+        "EventName": "UNC_H_SNOOP_RESP.RSP_FWD_WB",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x20",
+        "Unit": "HA"
+    },
+    {
+        "BriefDescription": "M line forwarded from remote cache with no writeback to memory. Derived from unc_h_snoop_resp.rspifwd",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x21",
+        "EventName": "UNC_H_SNOOP_RESP.RSPIFWD",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x4",
+        "Unit": "HA"
+    },
+    {
+        "BriefDescription": "Shared line response from remote cache. Derived from unc_h_snoop_resp.rsps",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x21",
+        "EventName": "UNC_H_SNOOP_RESP.RSPS",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x2",
+        "Unit": "HA"
+    },
+    {
+        "BriefDescription": "Shared line forwarded from remote cache. Derived from unc_h_snoop_resp.rspsfwd",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x21",
+        "EventName": "UNC_H_SNOOP_RESP.RSPSFWD",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x8",
+        "Unit": "HA"
+    }
+]
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/broadwellx/uncore-interconnect.json
new file mode 100644 (file)
index 0000000..39387f7
--- /dev/null
@@ -0,0 +1,28 @@
+[
+    {
+        "BriefDescription": "QPI clock ticks. Derived from unc_q_clockticks",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x14",
+        "EventName": "UNC_Q_CLOCKTICKS",
+        "PerPkg": "1",
+        "Unit": "QPI LL"
+    },
+    {
+        "BriefDescription": "Number of data flits transmitted . Derived from unc_q_txl_flits_g0.data",
+        "Counter": "0,1,2,3",
+        "EventName": "UNC_Q_TxL_FLITS_G0.DATA",
+        "PerPkg": "1",
+        "ScaleUnit": "8Bytes",
+        "UMask": "0x2",
+        "Unit": "QPI LL"
+    },
+    {
+        "BriefDescription": "Number of non data (control) flits transmitted . Derived from unc_q_txl_flits_g0.non_data",
+        "Counter": "0,1,2,3",
+        "EventName": "UNC_Q_TxL_FLITS_G0.NON_DATA",
+        "PerPkg": "1",
+        "ScaleUnit": "8Bytes",
+        "UMask": "0x4",
+        "Unit": "QPI LL"
+    }
+]
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/uncore-memory.json b/tools/perf/pmu-events/arch/x86/broadwellx/uncore-memory.json
new file mode 100644 (file)
index 0000000..d17dc23
--- /dev/null
@@ -0,0 +1,83 @@
+[
+    {
+        "BriefDescription": "read requests to memory controller. Derived from unc_m_cas_count.rd",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x4",
+        "EventName": "UNC_M_CAS_COUNT.RD",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x3",
+        "Unit": "iMC"
+    },
+    {
+        "BriefDescription": "write requests to memory controller. Derived from unc_m_cas_count.wr",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x4",
+        "EventName": "UNC_M_CAS_COUNT.WR",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0xC",
+        "Unit": "iMC"
+    },
+    {
+        "BriefDescription": "Memory controller clock ticks. Derived from unc_m_clockticks",
+        "Counter": "0,1,2,3",
+        "EventName": "UNC_M_CLOCKTICKS",
+        "PerPkg": "1",
+        "Unit": "iMC"
+    },
+    {
+        "BriefDescription": "Cycles where DRAM ranks are in power down (CKE) mode. Derived from unc_m_power_channel_ppd",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x85",
+        "EventName": "UNC_M_POWER_CHANNEL_PPD",
+        "MetricExpr": "(UNC_M_POWER_CHANNEL_PPD / UNC_M_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "iMC"
+    },
+    {
+        "BriefDescription": "Cycles all ranks are in critical thermal throttle. Derived from unc_m_power_critical_throttle_cycles",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x86",
+        "EventName": "UNC_M_POWER_CRITICAL_THROTTLE_CYCLES",
+        "MetricExpr": "(UNC_M_POWER_CRITICAL_THROTTLE_CYCLES / UNC_M_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "iMC"
+    },
+    {
+        "BriefDescription": "Cycles Memory is in self refresh power mode. Derived from unc_m_power_self_refresh",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x43",
+        "EventName": "UNC_M_POWER_SELF_REFRESH",
+        "MetricExpr": "(UNC_M_POWER_SELF_REFRESH / UNC_M_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "iMC"
+    },
+    {
+        "BriefDescription": "Pre-charges due to page misses. Derived from unc_m_pre_count.page_miss",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x2",
+        "EventName": "UNC_M_PRE_COUNT.PAGE_MISS",
+        "PerPkg": "1",
+        "UMask": "0x1",
+        "Unit": "iMC"
+    },
+    {
+        "BriefDescription": "Pre-charge for reads. Derived from unc_m_pre_count.rd",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x2",
+        "EventName": "UNC_M_PRE_COUNT.RD",
+        "PerPkg": "1",
+        "UMask": "0x4",
+        "Unit": "iMC"
+    },
+    {
+        "BriefDescription": "Pre-charge for writes. Derived from unc_m_pre_count.wr",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x2",
+        "EventName": "UNC_M_PRE_COUNT.WR",
+        "PerPkg": "1",
+        "UMask": "0x8",
+        "Unit": "iMC"
+    }
+]
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/uncore-power.json b/tools/perf/pmu-events/arch/x86/broadwellx/uncore-power.json
new file mode 100644 (file)
index 0000000..b44d430
--- /dev/null
@@ -0,0 +1,84 @@
+[
+    {
+        "BriefDescription": "PCU clock ticks. Use to get percentages of PCU cycles events. Derived from unc_p_clockticks",
+        "Counter": "0,1,2,3",
+        "EventName": "UNC_P_CLOCKTICKS",
+        "PerPkg": "1",
+        "Unit": "PCU"
+    },
+    {
+        "BriefDescription": "C0 and C1. Derived from unc_p_power_state_occupancy.cores_c0",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x80",
+        "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C0",
+        "Filter": "occ_sel=1",
+        "MetricExpr": "(UNC_P_POWER_STATE_OCCUPANCY.CORES_C0 / UNC_P_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "PCU"
+    },
+    {
+        "BriefDescription": "C3. Derived from unc_p_power_state_occupancy.cores_c3",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x80",
+        "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C3",
+        "Filter": "occ_sel=2",
+        "MetricExpr": "(UNC_P_POWER_STATE_OCCUPANCY.CORES_C3 / UNC_P_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "PCU"
+    },
+    {
+        "BriefDescription": "C6 and C7. Derived from unc_p_power_state_occupancy.cores_c6",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x80",
+        "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C6",
+        "Filter": "occ_sel=3",
+        "MetricExpr": "(UNC_P_POWER_STATE_OCCUPANCY.CORES_C6 / UNC_P_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "PCU"
+    },
+    {
+        "BriefDescription": "External Prochot. Derived from unc_p_prochot_external_cycles",
+        "Counter": "0,1,2,3",
+        "EventCode": "0xA",
+        "EventName": "UNC_P_PROCHOT_EXTERNAL_CYCLES",
+        "MetricExpr": "(UNC_P_PROCHOT_EXTERNAL_CYCLES / UNC_P_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "PCU"
+    },
+    {
+        "BriefDescription": "Thermal Strongest Upper Limit Cycles. Derived from unc_p_freq_max_limit_thermal_cycles",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x4",
+        "EventName": "UNC_P_FREQ_MAX_LIMIT_THERMAL_CYCLES",
+        "MetricExpr": "(UNC_P_FREQ_MAX_LIMIT_THERMAL_CYCLES / UNC_P_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "PCU"
+    },
+    {
+        "BriefDescription": "OS Strongest Upper Limit Cycles. Derived from unc_p_freq_max_os_cycles",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x6",
+        "EventName": "UNC_P_FREQ_MAX_OS_CYCLES",
+        "MetricExpr": "(UNC_P_FREQ_MAX_OS_CYCLES / UNC_P_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "PCU"
+    },
+    {
+        "BriefDescription": "Power Strongest Upper Limit Cycles. Derived from unc_p_freq_max_power_cycles",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x5",
+        "EventName": "UNC_P_FREQ_MAX_POWER_CYCLES",
+        "MetricExpr": "(UNC_P_FREQ_MAX_POWER_CYCLES / UNC_P_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "PCU"
+    },
+    {
+        "BriefDescription": "Cycles spent changing Frequency. Derived from unc_p_freq_trans_cycles",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x74",
+        "EventName": "UNC_P_FREQ_TRANS_CYCLES",
+        "MetricExpr": "(UNC_P_FREQ_TRANS_CYCLES / UNC_P_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "PCU"
+    }
+]
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/uncore-cache.json b/tools/perf/pmu-events/arch/x86/haswellx/uncore-cache.json
new file mode 100644 (file)
index 0000000..076459c
--- /dev/null
@@ -0,0 +1,317 @@
+[
+    {
+        "BriefDescription": "Uncore cache clock ticks. Derived from unc_c_clockticks",
+        "Counter": "0,1,2,3",
+        "EventName": "UNC_C_CLOCKTICKS",
+        "PerPkg": "1",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "All LLC Misses (code+ data rd + data wr - including demand and prefetch). Derived from unc_c_llc_lookup.any",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x34",
+        "EventName": "UNC_C_LLC_LOOKUP.ANY",
+        "Filter": "filter_state=0x1",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x11",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "M line evictions from LLC (writebacks to memory). Derived from unc_c_llc_victims.m_state",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x37",
+        "EventName": "UNC_C_LLC_VICTIMS.M_STATE",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x1",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "LLC misses - demand and prefetch data reads - excludes LLC prefetches. Derived from unc_c_tor_inserts.miss_opcode",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x35",
+        "EventName": "LLC_MISSES.DATA_READ",
+        "Filter": "filter_opc=0x182",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x3",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "LLC misses - Uncacheable reads (from cpu) . Derived from unc_c_tor_inserts.miss_opcode",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x35",
+        "EventName": "LLC_MISSES.UNCACHEABLE",
+        "Filter": "filter_opc=0x187",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x3",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "MMIO reads. Derived from unc_c_tor_inserts.miss_opcode",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x35",
+        "EventName": "LLC_MISSES.MMIO_READ",
+        "Filter": "filter_opc=0x187,filter_nc=1",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x3",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "MMIO writes. Derived from unc_c_tor_inserts.miss_opcode",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x35",
+        "EventName": "LLC_MISSES.MMIO_WRITE",
+        "Filter": "filter_opc=0x18f,filter_nc=1",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x3",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "LLC prefetch misses for RFO. Derived from unc_c_tor_inserts.miss_opcode",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x35",
+        "EventName": "LLC_MISSES.RFO_LLC_PREFETCH",
+        "Filter": "filter_opc=0x190",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x3",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "LLC prefetch misses for code reads. Derived from unc_c_tor_inserts.miss_opcode",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x35",
+        "EventName": "LLC_MISSES.CODE_LLC_PREFETCH",
+        "Filter": "filter_opc=0x191",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x3",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "LLC prefetch misses for data reads. Derived from unc_c_tor_inserts.miss_opcode",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x35",
+        "EventName": "LLC_MISSES.DATA_LLC_PREFETCH",
+        "Filter": "filter_opc=0x192",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x3",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "LLC misses for PCIe read current. Derived from unc_c_tor_inserts.miss_opcode",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x35",
+        "EventName": "LLC_MISSES.PCIE_READ",
+        "Filter": "filter_opc=0x19e",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x3",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "ItoM write misses (as part of fast string memcpy stores) + PCIe full line writes. Derived from unc_c_tor_inserts.miss_opcode",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x35",
+        "EventName": "LLC_MISSES.PCIE_WRITE",
+        "Filter": "filter_opc=0x1c8",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x3",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "PCIe write misses (full cache line). Derived from unc_c_tor_inserts.miss_opcode",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x35",
+        "EventName": "LLC_MISSES.PCIE_NON_SNOOP_WRITE",
+        "Filter": "filter_opc=0x1c8,filter_tid=0x3e",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x3",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "PCIe writes (partial cache line). Derived from unc_c_tor_inserts.opcode",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x35",
+        "EventName": "LLC_REFERENCES.PCIE_NS_PARTIAL_WRITE",
+        "Filter": "filter_opc=0x180,filter_tid=0x3e",
+        "PerPkg": "1",
+        "UMask": "0x1",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "L2 demand and L2 prefetch code references to LLC. Derived from unc_c_tor_inserts.opcode",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x35",
+        "EventName": "LLC_REFERENCES.CODE_LLC_PREFETCH",
+        "Filter": "filter_opc=0x181",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x1",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "Streaming stores (full cache line). Derived from unc_c_tor_inserts.opcode",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x35",
+        "EventName": "LLC_REFERENCES.STREAMING_FULL",
+        "Filter": "filter_opc=0x18c",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x1",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "Streaming stores (partial cache line). Derived from unc_c_tor_inserts.opcode",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x35",
+        "EventName": "LLC_REFERENCES.STREAMING_PARTIAL",
+        "Filter": "filter_opc=0x18d",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x1",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "PCIe read current. Derived from unc_c_tor_inserts.opcode",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x35",
+        "EventName": "LLC_REFERENCES.PCIE_READ",
+        "Filter": "filter_opc=0x19e",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x1",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "PCIe write references (full cache line). Derived from unc_c_tor_inserts.opcode",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x35",
+        "EventName": "LLC_REFERENCES.PCIE_WRITE",
+        "Filter": "filter_opc=0x1c8,filter_tid=0x3e",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x1",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "Occupancy counter for LLC data reads (demand and L2 prefetch). Derived from unc_c_tor_occupancy.miss_opcode",
+        "EventCode": "0x36",
+        "EventName": "UNC_C_TOR_OCCUPANCY.LLC_DATA_READ",
+        "Filter": "filter_opc=0x182",
+        "PerPkg": "1",
+        "UMask": "0x3",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "read requests to home agent. Derived from unc_h_requests.reads",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x1",
+        "EventName": "UNC_H_REQUESTS.READS",
+        "PerPkg": "1",
+        "UMask": "0x3",
+        "Unit": "HA"
+    },
+    {
+        "BriefDescription": "read requests to local home agent. Derived from unc_h_requests.reads_local",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x1",
+        "EventName": "UNC_H_REQUESTS.READS_LOCAL",
+        "PerPkg": "1",
+        "UMask": "0x1",
+        "Unit": "HA"
+    },
+    {
+        "BriefDescription": "read requests to remote home agent. Derived from unc_h_requests.reads_remote",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x1",
+        "EventName": "UNC_H_REQUESTS.READS_REMOTE",
+        "PerPkg": "1",
+        "UMask": "0x2",
+        "Unit": "HA"
+    },
+    {
+        "BriefDescription": "write requests to home agent. Derived from unc_h_requests.writes",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x1",
+        "EventName": "UNC_H_REQUESTS.WRITES",
+        "PerPkg": "1",
+        "UMask": "0xC",
+        "Unit": "HA"
+    },
+    {
+        "BriefDescription": "write requests to local home agent. Derived from unc_h_requests.writes_local",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x1",
+        "EventName": "UNC_H_REQUESTS.WRITES_LOCAL",
+        "PerPkg": "1",
+        "UMask": "0x4",
+        "Unit": "HA"
+    },
+    {
+        "BriefDescription": "write requests to remote home agent. Derived from unc_h_requests.writes_remote",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x1",
+        "EventName": "UNC_H_REQUESTS.WRITES_REMOTE",
+        "PerPkg": "1",
+        "UMask": "0x8",
+        "Unit": "HA"
+    },
+    {
+        "BriefDescription": "Conflict requests (requests for same address from multiple agents simultaneously). Derived from unc_h_snoop_resp.rspcnflct",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x21",
+        "EventName": "UNC_H_SNOOP_RESP.RSPCNFLCT",
+        "PerPkg": "1",
+        "UMask": "0x40",
+        "Unit": "HA"
+    },
+    {
+        "BriefDescription": "M line forwarded from remote cache along with writeback to memory. Derived from unc_h_snoop_resp.rsp_fwd_wb",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x21",
+        "EventName": "UNC_H_SNOOP_RESP.RSP_FWD_WB",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x20",
+        "Unit": "HA"
+    },
+    {
+        "BriefDescription": "M line forwarded from remote cache with no writeback to memory. Derived from unc_h_snoop_resp.rspifwd",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x21",
+        "EventName": "UNC_H_SNOOP_RESP.RSPIFWD",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x4",
+        "Unit": "HA"
+    },
+    {
+        "BriefDescription": "Shared line response from remote cache. Derived from unc_h_snoop_resp.rsps",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x21",
+        "EventName": "UNC_H_SNOOP_RESP.RSPS",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x2",
+        "Unit": "HA"
+    },
+    {
+        "BriefDescription": "Shared line forwarded from remote cache. Derived from unc_h_snoop_resp.rspsfwd",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x21",
+        "EventName": "UNC_H_SNOOP_RESP.RSPSFWD",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x8",
+        "Unit": "HA"
+    }
+]
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/haswellx/uncore-interconnect.json
new file mode 100644 (file)
index 0000000..39387f7
--- /dev/null
@@ -0,0 +1,28 @@
+[
+    {
+        "BriefDescription": "QPI clock ticks. Derived from unc_q_clockticks",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x14",
+        "EventName": "UNC_Q_CLOCKTICKS",
+        "PerPkg": "1",
+        "Unit": "QPI LL"
+    },
+    {
+        "BriefDescription": "Number of data flits transmitted . Derived from unc_q_txl_flits_g0.data",
+        "Counter": "0,1,2,3",
+        "EventName": "UNC_Q_TxL_FLITS_G0.DATA",
+        "PerPkg": "1",
+        "ScaleUnit": "8Bytes",
+        "UMask": "0x2",
+        "Unit": "QPI LL"
+    },
+    {
+        "BriefDescription": "Number of non data (control) flits transmitted . Derived from unc_q_txl_flits_g0.non_data",
+        "Counter": "0,1,2,3",
+        "EventName": "UNC_Q_TxL_FLITS_G0.NON_DATA",
+        "PerPkg": "1",
+        "ScaleUnit": "8Bytes",
+        "UMask": "0x4",
+        "Unit": "QPI LL"
+    }
+]
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/uncore-memory.json b/tools/perf/pmu-events/arch/x86/haswellx/uncore-memory.json
new file mode 100644 (file)
index 0000000..d17dc23
--- /dev/null
@@ -0,0 +1,83 @@
+[
+    {
+        "BriefDescription": "read requests to memory controller. Derived from unc_m_cas_count.rd",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x4",
+        "EventName": "UNC_M_CAS_COUNT.RD",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x3",
+        "Unit": "iMC"
+    },
+    {
+        "BriefDescription": "write requests to memory controller. Derived from unc_m_cas_count.wr",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x4",
+        "EventName": "UNC_M_CAS_COUNT.WR",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0xC",
+        "Unit": "iMC"
+    },
+    {
+        "BriefDescription": "Memory controller clock ticks. Derived from unc_m_clockticks",
+        "Counter": "0,1,2,3",
+        "EventName": "UNC_M_CLOCKTICKS",
+        "PerPkg": "1",
+        "Unit": "iMC"
+    },
+    {
+        "BriefDescription": "Cycles where DRAM ranks are in power down (CKE) mode. Derived from unc_m_power_channel_ppd",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x85",
+        "EventName": "UNC_M_POWER_CHANNEL_PPD",
+        "MetricExpr": "(UNC_M_POWER_CHANNEL_PPD / UNC_M_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "iMC"
+    },
+    {
+        "BriefDescription": "Cycles all ranks are in critical thermal throttle. Derived from unc_m_power_critical_throttle_cycles",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x86",
+        "EventName": "UNC_M_POWER_CRITICAL_THROTTLE_CYCLES",
+        "MetricExpr": "(UNC_M_POWER_CRITICAL_THROTTLE_CYCLES / UNC_M_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "iMC"
+    },
+    {
+        "BriefDescription": "Cycles Memory is in self refresh power mode. Derived from unc_m_power_self_refresh",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x43",
+        "EventName": "UNC_M_POWER_SELF_REFRESH",
+        "MetricExpr": "(UNC_M_POWER_SELF_REFRESH / UNC_M_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "iMC"
+    },
+    {
+        "BriefDescription": "Pre-charges due to page misses. Derived from unc_m_pre_count.page_miss",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x2",
+        "EventName": "UNC_M_PRE_COUNT.PAGE_MISS",
+        "PerPkg": "1",
+        "UMask": "0x1",
+        "Unit": "iMC"
+    },
+    {
+        "BriefDescription": "Pre-charge for reads. Derived from unc_m_pre_count.rd",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x2",
+        "EventName": "UNC_M_PRE_COUNT.RD",
+        "PerPkg": "1",
+        "UMask": "0x4",
+        "Unit": "iMC"
+    },
+    {
+        "BriefDescription": "Pre-charge for writes. Derived from unc_m_pre_count.wr",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x2",
+        "EventName": "UNC_M_PRE_COUNT.WR",
+        "PerPkg": "1",
+        "UMask": "0x8",
+        "Unit": "iMC"
+    }
+]
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/uncore-power.json b/tools/perf/pmu-events/arch/x86/haswellx/uncore-power.json
new file mode 100644 (file)
index 0000000..b44d430
--- /dev/null
@@ -0,0 +1,84 @@
+[
+    {
+        "BriefDescription": "PCU clock ticks. Use to get percentages of PCU cycles events. Derived from unc_p_clockticks",
+        "Counter": "0,1,2,3",
+        "EventName": "UNC_P_CLOCKTICKS",
+        "PerPkg": "1",
+        "Unit": "PCU"
+    },
+    {
+        "BriefDescription": "C0 and C1. Derived from unc_p_power_state_occupancy.cores_c0",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x80",
+        "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C0",
+        "Filter": "occ_sel=1",
+        "MetricExpr": "(UNC_P_POWER_STATE_OCCUPANCY.CORES_C0 / UNC_P_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "PCU"
+    },
+    {
+        "BriefDescription": "C3. Derived from unc_p_power_state_occupancy.cores_c3",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x80",
+        "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C3",
+        "Filter": "occ_sel=2",
+        "MetricExpr": "(UNC_P_POWER_STATE_OCCUPANCY.CORES_C3 / UNC_P_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "PCU"
+    },
+    {
+        "BriefDescription": "C6 and C7. Derived from unc_p_power_state_occupancy.cores_c6",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x80",
+        "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C6",
+        "Filter": "occ_sel=3",
+        "MetricExpr": "(UNC_P_POWER_STATE_OCCUPANCY.CORES_C6 / UNC_P_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "PCU"
+    },
+    {
+        "BriefDescription": "External Prochot. Derived from unc_p_prochot_external_cycles",
+        "Counter": "0,1,2,3",
+        "EventCode": "0xA",
+        "EventName": "UNC_P_PROCHOT_EXTERNAL_CYCLES",
+        "MetricExpr": "(UNC_P_PROCHOT_EXTERNAL_CYCLES / UNC_P_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "PCU"
+    },
+    {
+        "BriefDescription": "Thermal Strongest Upper Limit Cycles. Derived from unc_p_freq_max_limit_thermal_cycles",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x4",
+        "EventName": "UNC_P_FREQ_MAX_LIMIT_THERMAL_CYCLES",
+        "MetricExpr": "(UNC_P_FREQ_MAX_LIMIT_THERMAL_CYCLES / UNC_P_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "PCU"
+    },
+    {
+        "BriefDescription": "OS Strongest Upper Limit Cycles. Derived from unc_p_freq_max_os_cycles",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x6",
+        "EventName": "UNC_P_FREQ_MAX_OS_CYCLES",
+        "MetricExpr": "(UNC_P_FREQ_MAX_OS_CYCLES / UNC_P_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "PCU"
+    },
+    {
+        "BriefDescription": "Power Strongest Upper Limit Cycles. Derived from unc_p_freq_max_power_cycles",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x5",
+        "EventName": "UNC_P_FREQ_MAX_POWER_CYCLES",
+        "MetricExpr": "(UNC_P_FREQ_MAX_POWER_CYCLES / UNC_P_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "PCU"
+    },
+    {
+        "BriefDescription": "Cycles spent changing Frequency. Derived from unc_p_freq_trans_cycles",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x74",
+        "EventName": "UNC_P_FREQ_TRANS_CYCLES",
+        "MetricExpr": "(UNC_P_FREQ_TRANS_CYCLES / UNC_P_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "PCU"
+    }
+]
diff --git a/tools/perf/pmu-events/arch/x86/ivytown/uncore-cache.json b/tools/perf/pmu-events/arch/x86/ivytown/uncore-cache.json
new file mode 100644 (file)
index 0000000..2efdc67
--- /dev/null
@@ -0,0 +1,322 @@
+[
+    {
+        "BriefDescription": "Uncore cache clock ticks. Derived from unc_c_clockticks",
+        "Counter": "0,1,2,3",
+        "EventName": "UNC_C_CLOCKTICKS",
+        "PerPkg": "1",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "All LLC Misses (code+ data rd + data wr - including demand and prefetch). Derived from unc_c_llc_lookup.any",
+        "Counter": "0,1",
+        "EventCode": "0x34",
+        "EventName": "UNC_C_LLC_LOOKUP.ANY",
+        "Filter": "filter_state=0x1",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x11",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "M line evictions from LLC (writebacks to memory). Derived from unc_c_llc_victims.m_state",
+        "Counter": "0,1",
+        "EventCode": "0x37",
+        "EventName": "UNC_C_LLC_VICTIMS.M_STATE",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x1",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "LLC misses - demand and prefetch data reads - excludes LLC prefetches. Derived from unc_c_tor_inserts.miss_opcode.demand",
+        "Counter": "0,1",
+        "EventCode": "0x35",
+        "EventName": "LLC_MISSES.DATA_READ",
+        "Filter": "filter_opc=0x182",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x3",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "LLC misses - Uncacheable reads. Derived from unc_c_tor_inserts.miss_opcode.uncacheable",
+        "Counter": "0,1",
+        "EventCode": "0x35",
+        "EventName": "LLC_MISSES.UNCACHEABLE",
+        "Filter": "filter_opc=0x187",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x3",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "LLC prefetch misses for RFO. Derived from unc_c_tor_inserts.miss_opcode.rfo_prefetch",
+        "Counter": "0,1",
+        "EventCode": "0x35",
+        "EventName": "LLC_MISSES.RFO_LLC_PREFETCH",
+        "Filter": "filter_opc=0x190",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x3",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "LLC prefetch misses for code reads. Derived from unc_c_tor_inserts.miss_opcode.code",
+        "Counter": "0,1",
+        "EventCode": "0x35",
+        "EventName": "LLC_MISSES.CODE_LLC_PREFETCH",
+        "Filter": "filter_opc=0x191",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x3",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "LLC prefetch misses for data reads. Derived from unc_c_tor_inserts.miss_opcode.data_read",
+        "Counter": "0,1",
+        "EventCode": "0x35",
+        "EventName": "LLC_MISSES.DATA_LLC_PREFETCH",
+        "Filter": "filter_opc=0x192",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x3",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "PCIe allocating writes that miss LLC - DDIO misses. Derived from unc_c_tor_inserts.miss_opcode.ddio_miss",
+        "Counter": "0,1",
+        "EventCode": "0x35",
+        "EventName": "LLC_MISSES.PCIE_WRITE",
+        "Filter": "filter_opc=0x19c",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x3",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "LLC misses for PCIe read current. Derived from unc_c_tor_inserts.miss_opcode.pcie_read",
+        "Counter": "0,1",
+        "EventCode": "0x35",
+        "EventName": "LLC_MISSES.PCIE_READ",
+        "Filter": "filter_opc=0x19e",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x3",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "LLC misses for ItoM writes (as part of fast string memcpy stores). Derived from unc_c_tor_inserts.miss_opcode.itom_write",
+        "Counter": "0,1",
+        "EventCode": "0x35",
+        "EventName": "LLC_MISSES.ITOM_WRITE",
+        "Filter": "filter_opc=0x1c8",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x3",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "LLC misses for PCIe non-snoop reads. Derived from unc_c_tor_inserts.miss_opcode.pcie_read",
+        "Counter": "0,1",
+        "EventCode": "0x35",
+        "EventName": "LLC_MISSES.PCIE_NON_SNOOP_READ",
+        "Filter": "filter_opc=0x1e4",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x3",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "LLC misses for PCIe non-snoop writes (full line). Derived from unc_c_tor_inserts.miss_opcode.pcie_write",
+        "Counter": "0,1",
+        "EventCode": "0x35",
+        "EventName": "LLC_MISSES.PCIE_NON_SNOOP_WRITE",
+        "Filter": "filter_opc=0x1e6",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x3",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "Streaming stores (full cache line). Derived from unc_c_tor_inserts.opcode.streaming_full",
+        "Counter": "0,1",
+        "EventCode": "0x35",
+        "EventName": "LLC_REFERENCES.STREAMING_FULL",
+        "Filter": "filter_opc=0x18c",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x1",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "Streaming stores (partial cache line). Derived from unc_c_tor_inserts.opcode.streaming_partial",
+        "Counter": "0,1",
+        "EventCode": "0x35",
+        "EventName": "LLC_REFERENCES.STREAMING_PARTIAL",
+        "Filter": "filter_opc=0x18d",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x1",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "Partial PCIe reads. Derived from unc_c_tor_inserts.opcode.pcie_partial",
+        "Counter": "0,1",
+        "EventCode": "0x35",
+        "EventName": "LLC_REFERENCES.PCIE_PARTIAL_READ",
+        "Filter": "filter_opc=0x195",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x1",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "PCIe allocating writes that hit in LLC (DDIO hits). Derived from unc_c_tor_inserts.opcode.ddio_hit",
+        "Counter": "0,1",
+        "EventCode": "0x35",
+        "EventName": "LLC_REFERENCES.PCIE_WRITE",
+        "Filter": "filter_opc=0x19c",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x1",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "PCIe read current. Derived from unc_c_tor_inserts.opcode.pcie_read_current",
+        "Counter": "0,1",
+        "EventCode": "0x35",
+        "EventName": "LLC_REFERENCES.PCIE_READ",
+        "Filter": "filter_opc=0x19e",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x1",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "ItoM write hits (as part of fast string memcpy stores). Derived from unc_c_tor_inserts.opcode.itom_write_hit",
+        "Counter": "0,1",
+        "EventCode": "0x35",
+        "EventName": "LLC_REFERENCES.ITOM_WRITE",
+        "Filter": "filter_opc=0x1c8",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x1",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "PCIe non-snoop reads. Derived from unc_c_tor_inserts.opcode.pcie_read",
+        "Counter": "0,1",
+        "EventCode": "0x35",
+        "EventName": "LLC_REFERENCES.PCIE_NS_READ",
+        "Filter": "filter_opc=0x1e4",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x1",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "PCIe non-snoop writes (partial). Derived from unc_c_tor_inserts.opcode.pcie_partial_write",
+        "Counter": "0,1",
+        "EventCode": "0x35",
+        "EventName": "LLC_REFERENCES.PCIE_NS_PARTIAL_WRITE",
+        "Filter": "filter_opc=0x1e5",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x1",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "PCIe non-snoop writes (full line). Derived from unc_c_tor_inserts.opcode.pcie_full_write",
+        "Counter": "0,1",
+        "EventCode": "0x35",
+        "EventName": "LLC_REFERENCES.PCIE_NS_WRITE",
+        "Filter": "filter_opc=0x1e6",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x1",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "Occupancy for all LLC misses that are addressed to local memory. Derived from unc_c_tor_occupancy.miss_local",
+        "EventCode": "0x36",
+        "EventName": "UNC_C_TOR_OCCUPANCY.MISS_LOCAL",
+        "PerPkg": "1",
+        "UMask": "0x2A",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "Occupancy counter for LLC data reads (demand and L2 prefetch). Derived from unc_c_tor_occupancy.miss_opcode.llc_data_read",
+        "EventCode": "0x36",
+        "EventName": "UNC_C_TOR_OCCUPANCY.LLC_DATA_READ",
+        "Filter": "filter_opc=0x182",
+        "PerPkg": "1",
+        "UMask": "0x3",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "Occupancy for all LLC misses that are addressed to remote memory. Derived from unc_c_tor_occupancy.miss_remote",
+        "EventCode": "0x36",
+        "EventName": "UNC_C_TOR_OCCUPANCY.MISS_REMOTE",
+        "PerPkg": "1",
+        "UMask": "0x8A",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "Read requests to home agent. Derived from unc_h_requests.reads",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x1",
+        "EventName": "UNC_H_REQUESTS.READS",
+        "PerPkg": "1",
+        "UMask": "0x3",
+        "Unit": "HA"
+    },
+    {
+        "BriefDescription": "Write requests to home agent. Derived from unc_h_requests.writes",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x1",
+        "EventName": "UNC_H_REQUESTS.WRITES",
+        "PerPkg": "1",
+        "UMask": "0xC",
+        "Unit": "HA"
+    },
+    {
+        "BriefDescription": "M line forwarded from remote cache along with writeback to memory. Derived from unc_h_snoop_resp.rsp_fwd_wb",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x21",
+        "EventName": "UNC_H_SNOOP_RESP.RSP_FWD_WB",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x20",
+        "Unit": "HA"
+    },
+    {
+        "BriefDescription": "M line forwarded from remote cache with no writeback to memory. Derived from unc_h_snoop_resp.rspifwd",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x21",
+        "EventName": "UNC_H_SNOOP_RESP.RSPIFWD",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x4",
+        "Unit": "HA"
+    },
+    {
+        "BriefDescription": "Shared line response from remote cache. Derived from unc_h_snoop_resp.rsps",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x21",
+        "EventName": "UNC_H_SNOOP_RESP.RSPS",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x2",
+        "Unit": "HA"
+    },
+    {
+        "BriefDescription": "Shared line forwarded from remote cache. Derived from unc_h_snoop_resp.rspsfwd",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x21",
+        "EventName": "UNC_H_SNOOP_RESP.RSPSFWD",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x8",
+        "Unit": "HA"
+    }
+]
diff --git a/tools/perf/pmu-events/arch/x86/ivytown/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/ivytown/uncore-interconnect.json
new file mode 100644 (file)
index 0000000..d7e2fda
--- /dev/null
@@ -0,0 +1,46 @@
+[
+    {
+        "BriefDescription": "QPI clock ticks. Use to get percentages for QPI cycles events. Derived from unc_q_clockticks",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x14",
+        "EventName": "UNC_Q_CLOCKTICKS",
+        "PerPkg": "1",
+        "Unit": "QPI LL"
+    },
+    {
+        "BriefDescription": "Cycles where receiving QPI link is in half-width mode. Derived from unc_q_rxl0p_power_cycles",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x10",
+        "EventName": "UNC_Q_RxL0P_POWER_CYCLES",
+        "MetricExpr": "(UNC_Q_RxL0P_POWER_CYCLES / UNC_Q_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "QPI LL"
+    },
+    {
+        "BriefDescription": "Cycles where transmitting QPI link is in half-width mode. Derived from unc_q_txl0p_power_cycles",
+        "Counter": "0,1,2,3",
+        "EventCode": "0xd",
+        "EventName": "UNC_Q_TxL0P_POWER_CYCLES",
+        "MetricExpr": "(UNC_Q_TxL0P_POWER_CYCLES / UNC_Q_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "QPI LL"
+    },
+    {
+        "BriefDescription": "Number of data flits transmitted . Derived from unc_q_txl_flits_g0.data",
+        "Counter": "0,1,2,3",
+        "EventName": "UNC_Q_TxL_FLITS_G0.DATA",
+        "PerPkg": "1",
+        "ScaleUnit": "8Bytes",
+        "UMask": "0x2",
+        "Unit": "QPI LL"
+    },
+    {
+        "BriefDescription": "Number of non data (control) flits transmitted . Derived from unc_q_txl_flits_g0.non_data",
+        "Counter": "0,1,2,3",
+        "EventName": "UNC_Q_TxL_FLITS_G0.NON_DATA",
+        "PerPkg": "1",
+        "ScaleUnit": "8Bytes",
+        "UMask": "0x4",
+        "Unit": "QPI LL"
+    }
+]
diff --git a/tools/perf/pmu-events/arch/x86/ivytown/uncore-memory.json b/tools/perf/pmu-events/arch/x86/ivytown/uncore-memory.json
new file mode 100644 (file)
index 0000000..ac4ad4d
--- /dev/null
@@ -0,0 +1,75 @@
+[
+    {
+        "BriefDescription": "Memory page activates for reads and writes. Derived from unc_m_act_count.rd",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x1",
+        "EventName": "UNC_M_ACT_COUNT.RD",
+        "PerPkg": "1",
+        "UMask": "0x1",
+        "Umask": "0x3",
+        "Unit": "iMC"
+    },
+    {
+        "BriefDescription": "Read requests to memory controller. Derived from unc_m_cas_count.rd",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x4",
+        "EventName": "UNC_M_CAS_COUNT.RD",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x3",
+        "Unit": "iMC"
+    },
+    {
+        "BriefDescription": "Write requests to memory controller. Derived from unc_m_cas_count.wr",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x4",
+        "EventName": "UNC_M_CAS_COUNT.WR",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0xC",
+        "Unit": "iMC"
+    },
+    {
+        "BriefDescription": "Memory controller clock ticks. Use to generate percentages for memory controller CYCLES events. Derived from unc_m_clockticks",
+        "Counter": "0,1,2,3",
+        "EventName": "UNC_M_CLOCKTICKS",
+        "PerPkg": "1",
+        "Unit": "iMC"
+    },
+    {
+        "BriefDescription": "Cycles where DRAM ranks are in power down (CKE) mode. Derived from unc_m_power_channel_ppd",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x85",
+        "EventName": "UNC_M_POWER_CHANNEL_PPD",
+        "MetricExpr": "(UNC_M_POWER_CHANNEL_PPD / UNC_M_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "iMC"
+    },
+    {
+        "BriefDescription": "Cycles all ranks are in critical thermal throttle. Derived from unc_m_power_critical_throttle_cycles",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x86",
+        "EventName": "UNC_M_POWER_CRITICAL_THROTTLE_CYCLES",
+        "MetricExpr": "(UNC_M_POWER_CRITICAL_THROTTLE_CYCLES / UNC_M_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "iMC"
+    },
+    {
+        "BriefDescription": "Cycles Memory is in self refresh power mode. Derived from unc_m_power_self_refresh",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x43",
+        "EventName": "UNC_M_POWER_SELF_REFRESH",
+        "MetricExpr": "(UNC_M_POWER_SELF_REFRESH / UNC_M_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "iMC"
+    },
+    {
+        "BriefDescription": "Memory page conflicts. Derived from unc_m_pre_count.page_miss",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x2",
+        "EventName": "UNC_M_PRE_COUNT.PAGE_MISS",
+        "PerPkg": "1",
+        "UMask": "0x1",
+        "Unit": "iMC"
+    }
+]
diff --git a/tools/perf/pmu-events/arch/x86/ivytown/uncore-power.json b/tools/perf/pmu-events/arch/x86/ivytown/uncore-power.json
new file mode 100644 (file)
index 0000000..dc2586d
--- /dev/null
@@ -0,0 +1,249 @@
+[
+    {
+        "BriefDescription": "PCU clock ticks. Use to get percentages of PCU cycles events. Derived from unc_p_clockticks",
+        "Counter": "0,1,2,3",
+        "EventName": "UNC_P_CLOCKTICKS",
+        "PerPkg": "1",
+        "Unit": "PCU"
+    },
+    {
+        "BriefDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter.  (filter_band0=XXX, with XXX in 100Mhz units). One can also use inversion (filter_inv=1) to track cycles when we were less than the configured frequency. Derived from unc_p_freq_band0_cycles",
+        "Counter": "0,1,2,3",
+        "EventCode": "0xb",
+        "EventName": "UNC_P_FREQ_BAND0_CYCLES",
+        "MetricExpr": "(UNC_P_FREQ_BAND0_CYCLES / UNC_P_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "PCU"
+    },
+    {
+        "BriefDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter.  (filter_band1=XXX, with XXX in 100Mhz units). One can also use inversion (filter_inv=1) to track cycles when we were less than the configured frequency. Derived from unc_p_freq_band1_cycles",
+        "Counter": "0,1,2,3",
+        "EventCode": "0xc",
+        "EventName": "UNC_P_FREQ_BAND1_CYCLES",
+        "MetricExpr": "(UNC_P_FREQ_BAND1_CYCLES / UNC_P_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "PCU"
+    },
+    {
+        "BriefDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter.  (filter_band2=XXX, with XXX in 100Mhz units). One can also use inversion (filter_inv=1) to track cycles when we were less than the configured frequency. Derived from unc_p_freq_band2_cycles",
+        "Counter": "0,1,2,3",
+        "EventCode": "0xd",
+        "EventName": "UNC_P_FREQ_BAND2_CYCLES",
+        "MetricExpr": "(UNC_P_FREQ_BAND2_CYCLES / UNC_P_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "PCU"
+    },
+    {
+        "BriefDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter.  (filter_band3=XXX, with XXX in 100Mhz units). One can also use inversion (filter_inv=1) to track cycles when we were less than the configured frequency. Derived from unc_p_freq_band3_cycles",
+        "Counter": "0,1,2,3",
+        "EventCode": "0xe",
+        "EventName": "UNC_P_FREQ_BAND3_CYCLES",
+        "MetricExpr": "(UNC_P_FREQ_BAND3_CYCLES / UNC_P_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "PCU"
+    },
+    {
+        "BriefDescription": "Counts the number of times that the uncore transitioned a frequency greater than or equal to the frequency that is configured in the filter.  (filter_band0=XXX, with XXX in 100Mhz units). One can also use inversion (filter_inv=1) to track cycles when we were less than the configured frequency. Derived from unc_p_freq_band0_cycles",
+        "Counter": "0,1,2,3",
+        "EventCode": "0xb",
+        "EventName": "UNC_P_FREQ_BAND0_TRANSITIONS",
+        "Filter": "edge=1",
+        "MetricExpr": "(UNC_P_FREQ_BAND0_CYCLES / UNC_P_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "PCU"
+    },
+    {
+        "BriefDescription": "Counts the number of times that the uncore transitioned to a frequency greater than or equal to the frequency that is configured in the filter.  (filter_band1=XXX, with XXX in 100Mhz units). One can also use inversion (filter_inv=1) to track cycles when we were less than the configured frequency. Derived from unc_p_freq_band1_cycles",
+        "Counter": "0,1,2,3",
+        "EventCode": "0xc",
+        "EventName": "UNC_P_FREQ_BAND1_TRANSITIONS",
+        "Filter": "edge=1",
+        "MetricExpr": "(UNC_P_FREQ_BAND1_CYCLES / UNC_P_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "PCU"
+    },
+    {
+        "BriefDescription": "Counts the number of cycles that the uncore transitioned to a frequency greater than or equal to the frequency that is configured in the filter.  (filter_band2=XXX, with XXX in 100Mhz units). One can also use inversion (filter_inv=1) to track cycles when we were less than the configured frequency. Derived from unc_p_freq_band2_cycles",
+        "Counter": "0,1,2,3",
+        "EventCode": "0xd",
+        "EventName": "UNC_P_FREQ_BAND2_TRANSITIONS",
+        "Filter": "edge=1",
+        "MetricExpr": "(UNC_P_FREQ_BAND2_CYCLES / UNC_P_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "PCU"
+    },
+    {
+        "BriefDescription": "Counts the number of cycles that the uncore transitioned to a frequency greater than or equal to the frequency that is configured in the filter.  (filter_band3=XXX, with XXX in 100Mhz units). One can also use inversion (filter_inv=1) to track cycles when we were less than the configured frequency. Derived from unc_p_freq_band3_cycles",
+        "Counter": "0,1,2,3",
+        "EventCode": "0xe",
+        "EventName": "UNC_P_FREQ_BAND3_TRANSITIONS",
+        "Filter": "edge=1",
+        "MetricExpr": "(UNC_P_FREQ_BAND3_CYCLES / UNC_P_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "PCU"
+    },
+    {
+        "BriefDescription": "This is an occupancy event that tracks the number of cores that are in the chosen C-State.  It can be used by itself to get the average number of cores in that C-state with threshholding to generate histograms, or with other PCU events and occupancy triggering to capture other details. Derived from unc_p_power_state_occupancy.cores_c0",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x80",
+        "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C0",
+        "Filter": "occ_sel=1",
+        "MetricExpr": "(UNC_P_POWER_STATE_OCCUPANCY.CORES_C0 / UNC_P_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "PCU"
+    },
+    {
+        "BriefDescription": "This is an occupancy event that tracks the number of cores that are in the chosen C-State.  It can be used by itself to get the average number of cores in that C-state with threshholding to generate histograms, or with other PCU events and occupancy triggering to capture other details. Derived from unc_p_power_state_occupancy.cores_c3",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x80",
+        "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C3",
+        "Filter": "occ_sel=2",
+        "MetricExpr": "(UNC_P_POWER_STATE_OCCUPANCY.CORES_C3 / UNC_P_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "PCU"
+    },
+    {
+        "BriefDescription": "This is an occupancy event that tracks the number of cores that are in the chosen C-State.  It can be used by itself to get the average number of cores in that C-state with threshholding to generate histograms, or with other PCU events and occupancy triggering to capture other details. Derived from unc_p_power_state_occupancy.cores_c6",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x80",
+        "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C6",
+        "Filter": "occ_sel=3",
+        "MetricExpr": "(UNC_P_POWER_STATE_OCCUPANCY.CORES_C6 / UNC_P_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "PCU"
+    },
+    {
+        "BriefDescription": "Counts the number of cycles that we are in external PROCHOT mode.  This mode is triggered when a sensor off the die determines that something off-die (like DRAM) is too hot and must throttle to avoid damaging the chip. Derived from unc_p_prochot_external_cycles",
+        "Counter": "0,1,2,3",
+        "EventCode": "0xa",
+        "EventName": "UNC_P_PROCHOT_EXTERNAL_CYCLES",
+        "MetricExpr": "(UNC_P_PROCHOT_EXTERNAL_CYCLES / UNC_P_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "PCU"
+    },
+    {
+        "BriefDescription": "Counts the number of cycles when thermal conditions are the upper limit on frequency.  This is related to the THERMAL_THROTTLE CYCLES_ABOVE_TEMP event, which always counts cycles when we are above the thermal temperature.  This event (STRONGEST_UPPER_LIMIT) is sampled at the output of the algorithm that determines the actual frequency, while THERMAL_THROTTLE looks at the input. Derived from unc_p_freq_max_limit_thermal_cycles",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x4",
+        "EventName": "UNC_P_FREQ_MAX_LIMIT_THERMAL_CYCLES",
+        "MetricExpr": "(UNC_P_FREQ_MAX_LIMIT_THERMAL_CYCLES / UNC_P_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "PCU"
+    },
+    {
+        "BriefDescription": "Counts the number of cycles when the OS is the upper limit on frequency. Derived from unc_p_freq_max_os_cycles",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x6",
+        "EventName": "UNC_P_FREQ_MAX_OS_CYCLES",
+        "MetricExpr": "(UNC_P_FREQ_MAX_OS_CYCLES / UNC_P_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "PCU"
+    },
+    {
+        "BriefDescription": "Counts the number of cycles when power is the upper limit on frequency. Derived from unc_p_freq_max_power_cycles",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x5",
+        "EventName": "UNC_P_FREQ_MAX_POWER_CYCLES",
+        "MetricExpr": "(UNC_P_FREQ_MAX_POWER_CYCLES / UNC_P_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "PCU"
+    },
+    {
+        "BriefDescription": "Counts the number of cycles when current is the upper limit on frequency. Derived from unc_p_freq_max_current_cycles",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x7",
+        "EventName": "UNC_P_FREQ_MAX_CURRENT_CYCLES",
+        "MetricExpr": "(UNC_P_FREQ_MAX_CURRENT_CYCLES / UNC_P_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "PCU"
+    },
+    {
+        "BriefDescription": "Counts the number of cycles when the system is changing frequency.  This can not be filtered by thread ID.  One can also use it with the occupancy counter that monitors number of threads in C0 to estimate the performance impact that frequency transitions had on the system. Derived from unc_p_freq_trans_cycles",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x60",
+        "EventName": "UNC_P_FREQ_TRANS_CYCLES",
+        "MetricExpr": "(UNC_P_FREQ_TRANS_CYCLES / UNC_P_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "PCU"
+    },
+    {
+        "BriefDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to 1.2Ghz. Derived from unc_p_freq_band0_cycles",
+        "Counter": "0,1,2,3",
+        "EventCode": "0xb",
+        "EventName": "UNC_P_FREQ_GE_1200MHZ_CYCLES",
+        "Filter": "filter_band0=1200",
+        "MetricExpr": "(UNC_P_FREQ_GE_1200MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "PCU"
+    },
+    {
+        "BriefDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to 2Ghz. Derived from unc_p_freq_band1_cycles",
+        "Counter": "0,1,2,3",
+        "EventCode": "0xc",
+        "EventName": "UNC_P_FREQ_GE_2000MHZ_CYCLES",
+        "Filter": "filter_band1=2000",
+        "MetricExpr": "(UNC_P_FREQ_GE_2000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "PCU"
+    },
+    {
+        "BriefDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to 3Ghz. Derived from unc_p_freq_band2_cycles",
+        "Counter": "0,1,2,3",
+        "EventCode": "0xd",
+        "EventName": "UNC_P_FREQ_GE_3000MHZ_CYCLES",
+        "Filter": "filter_band2=3000",
+        "MetricExpr": "(UNC_P_FREQ_GE_3000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "PCU"
+    },
+    {
+        "BriefDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to 4Ghz. Derived from unc_p_freq_band3_cycles",
+        "Counter": "0,1,2,3",
+        "EventCode": "0xe",
+        "EventName": "UNC_P_FREQ_GE_4000MHZ_CYCLES",
+        "Filter": "filter_band3=4000",
+        "MetricExpr": "(UNC_P_FREQ_GE_4000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "PCU"
+    },
+    {
+        "BriefDescription": "Counts the number of times that the uncore transitioned to a frequency greater than or equal to 1.2Ghz. Derived from unc_p_freq_band0_cycles",
+        "Counter": "0,1,2,3",
+        "EventCode": "0xb",
+        "EventName": "UNC_P_FREQ_GE_1200MHZ_TRANSITIONS",
+        "Filter": "edge=1,filter_band0=1200",
+        "MetricExpr": "(UNC_P_FREQ_GE_1200MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "PCU"
+    },
+    {
+        "BriefDescription": "Counts the number of times that the uncore transitioned to a frequency greater than or equal to 2Ghz. Derived from unc_p_freq_band1_cycles",
+        "Counter": "0,1,2,3",
+        "EventCode": "0xc",
+        "EventName": "UNC_P_FREQ_GE_2000MHZ_TRANSITIONS",
+        "Filter": "edge=1,filter_band1=2000",
+        "MetricExpr": "(UNC_P_FREQ_GE_2000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "PCU"
+    },
+    {
+        "BriefDescription": "Counts the number of cycles that the uncore transitioned to a frequency greater than or equal to 3Ghz. Derived from unc_p_freq_band2_cycles",
+        "Counter": "0,1,2,3",
+        "EventCode": "0xd",
+        "EventName": "UNC_P_FREQ_GE_3000MHZ_TRANSITIONS",
+        "Filter": "edge=1,filter_band2=4000",
+        "MetricExpr": "(UNC_P_FREQ_GE_3000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "PCU"
+    },
+    {
+        "BriefDescription": "Counts the number of cycles that the uncore transitioned to a frequency greater than or equal to 4Ghz. Derived from unc_p_freq_band3_cycles",
+        "Counter": "0,1,2,3",
+        "EventCode": "0xe",
+        "EventName": "UNC_P_FREQ_GE_4000MHZ_TRANSITIONS",
+        "Filter": "edge=1,filter_band3=4000",
+        "MetricExpr": "(UNC_P_FREQ_GE_4000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "PCU"
+    }
+]
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/uncore-cache.json b/tools/perf/pmu-events/arch/x86/jaketown/uncore-cache.json
new file mode 100644 (file)
index 0000000..2f23cf0
--- /dev/null
@@ -0,0 +1,209 @@
+[
+    {
+        "BriefDescription": "Uncore cache clock ticks. Derived from unc_c_clockticks",
+        "Counter": "0,1,2,3",
+        "EventName": "UNC_C_CLOCKTICKS",
+        "PerPkg": "1",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "All LLC Misses (code+ data rd + data wr - including demand and prefetch). Derived from unc_c_llc_lookup.any",
+        "Counter": "0,1",
+        "EventCode": "0x34",
+        "EventName": "UNC_C_LLC_LOOKUP.ANY",
+        "Filter": "filter_state=0x1",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x11",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "M line evictions from LLC (writebacks to memory). Derived from unc_c_llc_victims.m_state",
+        "Counter": "0,1",
+        "EventCode": "0x37",
+        "EventName": "UNC_C_LLC_VICTIMS.M_STATE",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x1",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "LLC misses - demand and prefetch data reads - excludes LLC prefetches. Derived from unc_c_tor_inserts.miss_opcode.demand",
+        "Counter": "0,1",
+        "EventCode": "0x35",
+        "EventName": "LLC_MISSES.DATA_READ",
+        "Filter": "filter_opc=0x182",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x3",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "LLC misses - Uncacheable reads. Derived from unc_c_tor_inserts.miss_opcode.uncacheable",
+        "Counter": "0,1",
+        "EventCode": "0x35",
+        "EventName": "LLC_MISSES.UNCACHEABLE",
+        "Filter": "filter_opc=0x187",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x3",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "PCIe allocating writes that miss LLC - DDIO misses. Derived from unc_c_tor_inserts.miss_opcode.ddio_miss",
+        "Counter": "0,1",
+        "EventCode": "0x35",
+        "EventName": "LLC_MISSES.PCIE_WRITE",
+        "Filter": "filter_opc=0x19c",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x3",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "LLC misses for ItoM writes (as part of fast string memcpy stores). Derived from unc_c_tor_inserts.miss_opcode.itom_write",
+        "Counter": "0,1",
+        "EventCode": "0x35",
+        "EventName": "LLC_MISSES.ITOM_WRITE",
+        "Filter": "filter_opc=0x1c8",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x3",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "Streaming stores (full cache line). Derived from unc_c_tor_inserts.opcode.streaming_full",
+        "Counter": "0,1",
+        "EventCode": "0x35",
+        "EventName": "LLC_REFERENCES.STREAMING_FULL",
+        "Filter": "filter_opc=0x18c",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x1",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "Streaming stores (partial cache line). Derived from unc_c_tor_inserts.opcode.streaming_partial",
+        "Counter": "0,1",
+        "EventCode": "0x35",
+        "EventName": "LLC_REFERENCES.STREAMING_PARTIAL",
+        "Filter": "filter_opc=0x18d",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x1",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "Partial PCIe reads. Derived from unc_c_tor_inserts.opcode.pcie_partial",
+        "Counter": "0,1",
+        "EventCode": "0x35",
+        "EventName": "LLC_REFERENCES.PCIE_PARTIAL_READ",
+        "Filter": "filter_opc=0x195",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x1",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "PCIe allocating writes that hit in LLC (DDIO hits). Derived from unc_c_tor_inserts.opcode.ddio_hit",
+        "Counter": "0,1",
+        "EventCode": "0x35",
+        "EventName": "LLC_REFERENCES.PCIE_WRITE",
+        "Filter": "filter_opc=0x19c",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x1",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "PCIe read current. Derived from unc_c_tor_inserts.opcode.pcie_read_current",
+        "Counter": "0,1",
+        "EventCode": "0x35",
+        "EventName": "LLC_REFERENCES.PCIE_READ",
+        "Filter": "filter_opc=0x19e",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x1",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "ItoM write hits (as part of fast string memcpy stores). Derived from unc_c_tor_inserts.opcode.itom_write_hit",
+        "Counter": "0,1",
+        "EventCode": "0x35",
+        "EventName": "LLC_REFERENCES.ITOM_WRITE",
+        "Filter": "filter_opc=0x1c8",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x1",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "PCIe non-snoop reads. Derived from unc_c_tor_inserts.opcode.pcie_read",
+        "Counter": "0,1",
+        "EventCode": "0x35",
+        "EventName": "LLC_REFERENCES.PCIE_NS_READ",
+        "Filter": "filter_opc=0x1e4",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x1",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "PCIe non-snoop writes (partial). Derived from unc_c_tor_inserts.opcode.pcie_partial_write",
+        "Counter": "0,1",
+        "EventCode": "0x35",
+        "EventName": "LLC_REFERENCES.PCIE_NS_PARTIAL_WRITE",
+        "Filter": "filter_opc=0x1e5",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x1",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "PCIe non-snoop writes (full line). Derived from unc_c_tor_inserts.opcode.pcie_full_write",
+        "Counter": "0,1",
+        "EventCode": "0x35",
+        "EventName": "LLC_REFERENCES.PCIE_NS_WRITE",
+        "Filter": "filter_opc=0x1e6",
+        "PerPkg": "1",
+        "ScaleUnit": "64Bytes",
+        "UMask": "0x1",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "Occupancy counter for all LLC misses; we divide this by UNC_C_CLOCKTICKS to get average Q depth. Derived from unc_c_tor_occupancy.miss_all",
+        "EventCode": "0x36",
+        "EventName": "UNC_C_TOR_OCCUPANCY.MISS_ALL",
+        "Filter": "filter_opc=0x182",
+        "MetricExpr": "(UNC_C_TOR_OCCUPANCY.MISS_ALL / UNC_C_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "UMask": "0xa",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "Occupancy counter for LLC data reads (demand and L2 prefetch). Derived from unc_c_tor_occupancy.miss_opcode.llc_data_read",
+        "EventCode": "0x36",
+        "EventName": "UNC_C_TOR_OCCUPANCY.LLC_DATA_READ",
+        "PerPkg": "1",
+        "UMask": "0x3",
+        "Unit": "CBO"
+    },
+    {
+        "BriefDescription": "read requests to home agent. Derived from unc_h_requests.reads",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x1",
+        "EventName": "UNC_H_REQUESTS.READS",
+        "PerPkg": "1",
+        "UMask": "0x3",
+        "Unit": "HA"
+    },
+    {
+        "BriefDescription": "write requests to home agent. Derived from unc_h_requests.writes",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x1",
+        "EventName": "UNC_H_REQUESTS.WRITES",
+        "PerPkg": "1",
+        "UMask": "0xc",
+        "Unit": "HA"
+    }
+]
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/jaketown/uncore-interconnect.json
new file mode 100644 (file)
index 0000000..6335187
--- /dev/null
@@ -0,0 +1,46 @@
+[
+    {
+        "BriefDescription": "QPI clock ticks. Used to get percentages of QPI cycles events. Derived from unc_q_clockticks",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x14",
+        "EventName": "UNC_Q_CLOCKTICKS",
+        "PerPkg": "1",
+        "Unit": "QPI LL"
+    },
+    {
+        "BriefDescription": "Cycles where receiving QPI link is in half-width mode. Derived from unc_q_rxl0p_power_cycles",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x10",
+        "EventName": "UNC_Q_RxL0P_POWER_CYCLES",
+        "MetricExpr": "(UNC_Q_RxL0P_POWER_CYCLES / UNC_Q_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "QPI LL"
+    },
+    {
+        "BriefDescription": "Cycles where transmitting QPI link is in half-width mode. Derived from unc_q_txl0p_power_cycles",
+        "Counter": "0,1,2,3",
+        "EventCode": "0xd",
+        "EventName": "UNC_Q_TxL0P_POWER_CYCLES",
+        "MetricExpr": "(UNC_Q_TxL0P_POWER_CYCLES / UNC_Q_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "QPI LL"
+    },
+    {
+        "BriefDescription": "Number of data flits transmitted . Derived from unc_q_txl_flits_g0.data",
+        "Counter": "0,1,2,3",
+        "EventName": "UNC_Q_TxL_FLITS_G0.DATA",
+        "PerPkg": "1",
+        "ScaleUnit": "8Bytes",
+        "UMask": "0x2",
+        "Unit": "QPI LL"
+    },
+    {
+        "BriefDescription": "Number of non data (control) flits transmitted . Derived from unc_q_txl_flits_g0.non_data",
+        "Counter": "0,1,2,3",
+        "EventName": "UNC_Q_TxL_FLITS_G0.NON_DATA",
+        "PerPkg": "1",
+        "ScaleUnit": "8Bytes",
+        "UMask": "0x4",
+        "Unit": "QPI LL"
+    }
+]
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/uncore-memory.json b/tools/perf/pmu-events/arch/x86/jaketown/uncore-memory.json
new file mode 100644 (file)
index 0000000..e2cf6da
--- /dev/null
@@ -0,0 +1,79 @@
+[
+    {
+        "BriefDescription": "Memory page activates. Derived from unc_m_act_count",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x1",
+        "EventName": "UNC_M_ACT_COUNT",
+        "PerPkg": "1",
+        "Unit": "iMC"
+    },
+    {
+        "BriefDescription": "read requests to memory controller. Derived from unc_m_cas_count.rd",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x4",
+        "EventName": "UNC_M_CAS_COUNT.RD",
+        "PerPkg": "1",
+        "UMask": "0x3",
+        "Unit": "iMC"
+    },
+    {
+        "BriefDescription": "write requests to memory controller. Derived from unc_m_cas_count.wr",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x4",
+        "EventName": "UNC_M_CAS_COUNT.WR",
+        "PerPkg": "1",
+        "UMask": "0xc",
+        "Unit": "iMC"
+    },
+    {
+        "BriefDescription": "Memory controller clock ticks. Used to get percentages of memory controller cycles events. Derived from unc_m_clockticks",
+        "Counter": "0,1,2,3",
+        "EventName": "UNC_M_CLOCKTICKS",
+        "PerPkg": "1",
+        "Unit": "iMC"
+    },
+    {
+        "BriefDescription": "Cycles where DRAM ranks are in power down (CKE) mode. Derived from unc_m_power_channel_ppd",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x85",
+        "EventName": "UNC_M_POWER_CHANNEL_PPD",
+        "MetricExpr": "(UNC_M_POWER_CHANNEL_PPD / UNC_M_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "iMC"
+    },
+    {
+        "BriefDescription": "Cycles all ranks are in critical thermal throttle. Derived from unc_m_power_critical_throttle_cycles",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x86",
+        "EventName": "UNC_M_POWER_CRITICAL_THROTTLE_CYCLES",
+        "MetricExpr": "(UNC_M_POWER_CRITICAL_THROTTLE_CYCLES / UNC_M_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "iMC"
+    },
+    {
+        "BriefDescription": "Cycles Memory is in self refresh power mode. Derived from unc_m_power_self_refresh",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x43",
+        "EventName": "UNC_M_POWER_SELF_REFRESH",
+        "MetricExpr": "(UNC_M_POWER_SELF_REFRESH / UNC_M_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "iMC"
+    },
+    {
+        "BriefDescription": "Memory page conflicts. Derived from unc_m_pre_count.page_miss",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x2",
+        "EventName": "UNC_M_PRE_COUNT.PAGE_MISS",
+        "PerPkg": "1",
+        "UMask": "0x1",
+        "Unit": "iMC"
+    },
+    {
+        "BriefDescription": "Occupancy counter for memory read queue. Derived from unc_m_rpq_occupancy",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x80",
+        "EventName": "UNC_M_RPQ_OCCUPANCY",
+        "PerPkg": "1",
+        "Unit": "iMC"
+    }
+]
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/uncore-power.json b/tools/perf/pmu-events/arch/x86/jaketown/uncore-power.json
new file mode 100644 (file)
index 0000000..bbe36d5
--- /dev/null
@@ -0,0 +1,248 @@
+[
+    {
+        "BriefDescription": "PCU clock ticks. Use to get percentages of PCU cycles events. Derived from unc_p_clockticks",
+        "Counter": "0,1,2,3",
+        "EventName": "UNC_P_CLOCKTICKS",
+        "PerPkg": "1",
+        "Unit": "PCU"
+    },
+    {
+        "BriefDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter.  (filter_band0=XXX with XXX in 100Mhz units). One can also use inversion (filter_inv=1) to track cycles when we were less than the configured frequency. Derived from unc_p_freq_band0_cycles",
+        "Counter": "0,1,2,3",
+        "EventCode": "0xb",
+        "EventName": "UNC_P_FREQ_BAND0_CYCLES",
+        "MetricExpr": "(UNC_P_FREQ_BAND0_CYCLES / UNC_P_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "PCU"
+    },
+    {
+        "BriefDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter.  (filter_band1=XXX with XXX in 100Mhz units). One can also use inversion (filter_inv=1) to track cycles when we were less than the configured frequency. Derived from unc_p_freq_band1_cycles",
+        "Counter": "0,1,2,3",
+        "EventCode": "0xc",
+        "EventName": "UNC_P_FREQ_BAND1_CYCLES",
+        "MetricExpr": "(UNC_P_FREQ_BAND1_CYCLES / UNC_P_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "PCU"
+    },
+    {
+        "BriefDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter.  (filter_band2=XXX with XXX in 100Mhz units). One can also use inversion (filter_inv=1) to track cycles when we were less than the configured frequency. Derived from unc_p_freq_band2_cycles",
+        "Counter": "0,1,2,3",
+        "EventCode": "0xd",
+        "EventName": "UNC_P_FREQ_BAND2_CYCLES",
+        "MetricExpr": "(UNC_P_FREQ_BAND2_CYCLES / UNC_P_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "PCU"
+    },
+    {
+        "BriefDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter.  (filter_band3=XXX, with XXX in 100Mhz units). One can also use inversion (filter_inv=1) to track cycles when we were less than the configured frequency. Derived from unc_p_freq_band3_cycles",
+        "Counter": "0,1,2,3",
+        "EventCode": "0xe",
+        "EventName": "UNC_P_FREQ_BAND3_CYCLES",
+        "MetricExpr": "(UNC_P_FREQ_BAND3_CYCLES / UNC_P_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "PCU"
+    },
+    {
+        "BriefDescription": "Counts the number of times that the uncore transitioned a frequency greater than or equal to the frequency that is configured in the filter.  (filter_band0=XXX with XXX in 100Mhz units). One can also use inversion (filter_inv=1) to track cycles when we were less than the configured frequency. Derived from unc_p_freq_band0_cycles",
+        "Counter": "0,1,2,3",
+        "EventCode": "0xb",
+        "EventName": "UNC_P_FREQ_BAND0_TRANSITIONS",
+        "Filter": "edge=1",
+        "MetricExpr": "(UNC_P_FREQ_BAND0_CYCLES / UNC_P_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "PCU"
+    },
+    {
+        "BriefDescription": "Counts the number of times that the uncore transistioned to a frequency greater than or equal to the frequency that is configured in the filter.  (filter_band1=XXX with XXX in 100Mhz units). One can also use inversion (filter_inv=1) to track cycles when we were less than the configured frequency. Derived from unc_p_freq_band1_cycles",
+        "Counter": "0,1,2,3",
+        "EventCode": "0xc",
+        "EventName": "UNC_P_FREQ_BAND1_TRANSITIONS",
+        "Filter": "edge=1",
+        "MetricExpr": "(UNC_P_FREQ_BAND1_CYCLES / UNC_P_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "PCU"
+    },
+    {
+        "BriefDescription": "Counts the number of cycles that the uncore transitioned to a frequency greater than or equal to the frequency that is configured in the filter.  (filter_band2=XXX with XXX in 100Mhz units). One can also use inversion (filter_inv=1) to track cycles when we were less than the configured frequency. Derived from unc_p_freq_band2_cycles",
+        "Counter": "0,1,2,3",
+        "EventCode": "0xd",
+        "EventName": "UNC_P_FREQ_BAND2_TRANSITIONS",
+        "Filter": "edge=1",
+        "MetricExpr": "(UNC_P_FREQ_BAND2_CYCLES / UNC_P_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "PCU"
+    },
+    {
+        "BriefDescription": "Counts the number of cycles that the uncore transitioned to a frequency greater than or equal to the frequency that is configured in the filter.  (filter_band3=XXX, with XXX in 100Mhz units). One can also use inversion (filter_inv=1) to track cycles when we were less than the configured frequency. Derived from unc_p_freq_band3_cycles",
+        "Counter": "0,1,2,3",
+        "EventCode": "0xe",
+        "EventName": "UNC_P_FREQ_BAND3_TRANSITIONS",
+        "Filter": "edge=1",
+        "MetricExpr": "(UNC_P_FREQ_BAND3_CYCLES / UNC_P_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "PCU"
+    },
+    {
+        "BriefDescription": "This is an occupancy event that tracks the number of cores that are in C0.  It can be used by itself to get the average number of cores in C0, with threshholding to generate histograms, or with other PCU events and occupancy triggering to capture other details. Derived from unc_p_power_state_occupancy.cores_c0",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x80",
+        "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C0",
+        "Filter": "occ_sel=1",
+        "MetricExpr": "(UNC_P_POWER_STATE_OCCUPANCY.CORES_C0 / UNC_P_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "PCU"
+    },
+    {
+        "BriefDescription": "This is an occupancy event that tracks the number of cores that are in C3.  It can be used by itself to get the average number of cores in C0, with threshholding to generate histograms, or with other PCU events and occupancy triggering to capture other details. Derived from unc_p_power_state_occupancy.cores_c3",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x80",
+        "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C3",
+        "Filter": "occ_sel=2",
+        "MetricExpr": "(UNC_P_POWER_STATE_OCCUPANCY.CORES_C3 / UNC_P_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "PCU"
+    },
+    {
+        "BriefDescription": "This is an occupancy event that tracks the number of cores that are in C6.  It can be used by itself to get the average number of cores in C0, with threshholding to generate histograms, or with other PCU events . Derived from unc_p_power_state_occupancy.cores_c6",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x80",
+        "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C6",
+        "Filter": "occ_sel=3",
+        "MetricExpr": "(UNC_P_POWER_STATE_OCCUPANCY.CORES_C6 / UNC_P_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "PCU"
+    },
+    {
+        "BriefDescription": "Counts the number of cycles that we are in external PROCHOT mode.  This mode is triggered when a sensor off the die determines that something off-die (like DRAM) is too hot and must throttle to avoid damaging the chip. Derived from unc_p_prochot_external_cycles",
+        "Counter": "0,1,2,3",
+        "EventCode": "0xa",
+        "EventName": "UNC_P_PROCHOT_EXTERNAL_CYCLES",
+        "MetricExpr": "(UNC_P_PROCHOT_EXTERNAL_CYCLES / UNC_P_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "PCU"
+    },
+    {
+        "BriefDescription": "Counts the number of cycles when temperature is the upper limit on frequency. Derived from unc_p_freq_max_limit_thermal_cycles",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x4",
+        "EventName": "UNC_P_FREQ_MAX_LIMIT_THERMAL_CYCLES",
+        "MetricExpr": "(UNC_P_FREQ_MAX_LIMIT_THERMAL_CYCLES / UNC_P_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "PCU"
+    },
+    {
+        "BriefDescription": "Counts the number of cycles when the OS is the upper limit on frequency. Derived from unc_p_freq_max_os_cycles",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x6",
+        "EventName": "UNC_P_FREQ_MAX_OS_CYCLES",
+        "MetricExpr": "(UNC_P_FREQ_MAX_OS_CYCLES / UNC_P_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "PCU"
+    },
+    {
+        "BriefDescription": "Counts the number of cycles when power is the upper limit on frequency. Derived from unc_p_freq_max_power_cycles",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x5",
+        "EventName": "UNC_P_FREQ_MAX_POWER_CYCLES",
+        "MetricExpr": "(UNC_P_FREQ_MAX_POWER_CYCLES / UNC_P_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "PCU"
+    },
+    {
+        "BriefDescription": "Counts the number of cycles when current is the upper limit on frequency. Derived from unc_p_freq_max_current_cycles",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x7",
+        "EventName": "UNC_P_FREQ_MAX_CURRENT_CYCLES",
+        "MetricExpr": "(UNC_P_FREQ_MAX_CURRENT_CYCLES / UNC_P_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "PCU"
+    },
+    {
+        "BriefDescription": "Cycles spent changing Frequency. Derived from unc_p_freq_trans_cycles",
+        "Counter": "0,1,2,3",
+        "EventName": "UNC_P_FREQ_TRANS_CYCLES",
+        "MetricExpr": "(UNC_P_FREQ_TRANS_CYCLES / UNC_P_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "PCU"
+    },
+    {
+        "BriefDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to 1.2Ghz. Derived from unc_p_freq_band0_cycles",
+        "Counter": "0,1,2,3",
+        "EventCode": "0xb",
+        "EventName": "UNC_P_FREQ_GE_1200MHZ_CYCLES",
+        "Filter": "filter_band0=1200",
+        "MetricExpr": "(UNC_P_FREQ_GE_1200MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "PCU"
+    },
+    {
+        "BriefDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to 2Ghz. Derived from unc_p_freq_band1_cycles",
+        "Counter": "0,1,2,3",
+        "EventCode": "0xc",
+        "EventName": "UNC_P_FREQ_GE_2000MHZ_CYCLES",
+        "Filter": "filter_band1=2000",
+        "MetricExpr": "(UNC_P_FREQ_GE_2000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "PCU"
+    },
+    {
+        "BriefDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to 3Ghz. Derived from unc_p_freq_band2_cycles",
+        "Counter": "0,1,2,3",
+        "EventCode": "0xd",
+        "EventName": "UNC_P_FREQ_GE_3000MHZ_CYCLES",
+        "Filter": "filter_band2=3000",
+        "MetricExpr": "(UNC_P_FREQ_GE_3000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "PCU"
+    },
+    {
+        "BriefDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to 4Ghz. Derived from unc_p_freq_band3_cycles",
+        "Counter": "0,1,2,3",
+        "EventCode": "0xe",
+        "EventName": "UNC_P_FREQ_GE_4000MHZ_CYCLES",
+        "Filter": "filter_band3=4000",
+        "MetricExpr": "(UNC_P_FREQ_GE_4000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "PCU"
+    },
+    {
+        "BriefDescription": "Counts the number of times that the uncore transitioned to a frequency greater than or equal to 1.2Ghz. Derived from unc_p_freq_band0_cycles",
+        "Counter": "0,1,2,3",
+        "EventCode": "0xb",
+        "EventName": "UNC_P_FREQ_GE_1200MHZ_TRANSITIONS",
+        "Filter": "edge=1,filter_band0=1200",
+        "MetricExpr": "(UNC_P_FREQ_GE_1200MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "PCU"
+    },
+    {
+        "BriefDescription": "Counts the number of times that the uncore transitioned to a frequency greater than or equal to 2Ghz. Derived from unc_p_freq_band1_cycles",
+        "Counter": "0,1,2,3",
+        "EventCode": "0xc",
+        "EventName": "UNC_P_FREQ_GE_2000MHZ_TRANSITIONS",
+        "Filter": "edge=1,filter_band1=2000",
+        "MetricExpr": "(UNC_P_FREQ_GE_2000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "PCU"
+    },
+    {
+        "BriefDescription": "Counts the number of cycles that the uncore transitioned to a frequency greater than or equal to 3Ghz. Derived from unc_p_freq_band2_cycles",
+        "Counter": "0,1,2,3",
+        "EventCode": "0xd",
+        "EventName": "UNC_P_FREQ_GE_3000MHZ_TRANSITIONS",
+        "Filter": "edge=1,filter_band2=4000",
+        "MetricExpr": "(UNC_P_FREQ_GE_3000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "PCU"
+    },
+    {
+        "BriefDescription": "Counts the number of cycles that the uncore transitioned to a frequency greater than or equal to 4Ghz. Derived from unc_p_freq_band3_cycles",
+        "Counter": "0,1,2,3",
+        "EventCode": "0xe",
+        "EventName": "UNC_P_FREQ_GE_4000MHZ_TRANSITIONS",
+        "Filter": "edge=1,filter_band3=4000",
+        "MetricExpr": "(UNC_P_FREQ_GE_4000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
+        "PerPkg": "1",
+        "Unit": "PCU"
+    }
+]
diff --git a/tools/perf/pmu-events/arch/x86/knightslanding/uncore-memory.json b/tools/perf/pmu-events/arch/x86/knightslanding/uncore-memory.json
new file mode 100644 (file)
index 0000000..e3bcd86
--- /dev/null
@@ -0,0 +1,42 @@
+[
+    {
+        "BriefDescription": "ddr bandwidth read (CPU traffic only) (MB/sec). ",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x03",
+        "EventName": "UNC_M_CAS_COUNT.RD",
+        "PerPkg": "1",
+        "ScaleUnit": "6.4e-05MiB",
+        "UMask": "0x01",
+        "Unit": "imc"
+    },
+    {
+        "BriefDescription": "ddr bandwidth write (CPU traffic only) (MB/sec). ",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x03",
+        "EventName": "UNC_M_CAS_COUNT.WR",
+        "PerPkg": "1",
+        "ScaleUnit": "6.4e-05MiB",
+        "UMask": "0x02",
+        "Unit": "imc"
+    },
+    {
+        "BriefDescription": "mcdram bandwidth read (CPU traffic only) (MB/sec). ",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x01",
+        "EventName": "UNC_E_RPQ_INSERTS",
+        "PerPkg": "1",
+        "ScaleUnit": "6.4e-05MiB",
+        "UMask": "0x01",
+        "Unit": "edc_eclk"
+    },
+    {
+        "BriefDescription": "mcdram bandwidth write (CPU traffic only) (MB/sec). ",
+        "Counter": "0,1,2,3",
+        "EventCode": "0x02",
+        "EventName": "UNC_E_WPQ_INSERTS",
+        "PerPkg": "1",
+        "ScaleUnit": "6.4e-05MiB",
+        "UMask": "0x01",
+        "Unit": "edc_eclk"
+    }
+]
index 41611d7f9873c4d4168a33bc878df69b7ca073b9..eed09346a72afab1ede36b7b3b507f3dc5d62a84 100644 (file)
@@ -135,7 +135,6 @@ static struct field {
        const char *field;
        const char *kernel;
 } fields[] = {
-       { "EventCode",  "event=" },
        { "UMask",      "umask=" },
        { "CounterMask", "cmask=" },
        { "Invert",     "inv=" },
@@ -189,6 +188,27 @@ static struct msrmap *lookup_msr(char *map, jsmntok_t *val)
        return NULL;
 }
 
+static struct map {
+       const char *json;
+       const char *perf;
+} unit_to_pmu[] = {
+       { "CBO", "uncore_cbox" },
+       { "QPI LL", "uncore_qpi" },
+       { "SBO", "uncore_sbox" },
+       {}
+};
+
+static const char *field_to_perf(struct map *table, char *map, jsmntok_t *val)
+{
+       int i;
+
+       for (i = 0; table[i].json; i++) {
+               if (json_streq(map, val, table[i].json))
+                       return table[i].perf;
+       }
+       return NULL;
+}
+
 #define EXPECT(e, t, m) do { if (!(e)) {                       \
        jsmntok_t *loc = (t);                                   \
        if (!(t)->start && (t) > tokens)                        \
@@ -270,7 +290,8 @@ static void print_events_table_prefix(FILE *fp, const char *tblname)
 }
 
 static int print_events_table_entry(void *data, char *name, char *event,
-                                   char *desc, char *long_desc)
+                                   char *desc, char *long_desc,
+                                   char *pmu, char *unit, char *perpkg)
 {
        struct perf_entry_data *pd = data;
        FILE *outfp = pd->outfp;
@@ -288,7 +309,12 @@ static int print_events_table_entry(void *data, char *name, char *event,
        fprintf(outfp, "\t.topic = \"%s\",\n", topic);
        if (long_desc && long_desc[0])
                fprintf(outfp, "\t.long_desc = \"%s\",\n", long_desc);
-
+       if (pmu)
+               fprintf(outfp, "\t.pmu = \"%s\",\n", pmu);
+       if (unit)
+               fprintf(outfp, "\t.unit = \"%s\",\n", unit);
+       if (perpkg)
+               fprintf(outfp, "\t.perpkg = \"%s\",\n", perpkg);
        fprintf(outfp, "},\n");
 
        return 0;
@@ -335,7 +361,8 @@ static char *real_event(const char *name, char *event)
 /* Call func with each event in the json file */
 int json_events(const char *fn,
          int (*func)(void *data, char *name, char *event, char *desc,
-                     char *long_desc),
+                     char *long_desc,
+                     char *pmu, char *unit, char *perpkg),
          void *data)
 {
        int err = -EIO;
@@ -343,6 +370,7 @@ int json_events(const char *fn,
        jsmntok_t *tokens, *tok;
        int i, j, len;
        char *map;
+       char buf[128];
 
        if (!fn)
                return -ENOENT;
@@ -356,6 +384,11 @@ int json_events(const char *fn,
                char *event = NULL, *desc = NULL, *name = NULL;
                char *long_desc = NULL;
                char *extra_desc = NULL;
+               char *pmu = NULL;
+               char *filter = NULL;
+               char *perpkg = NULL;
+               char *unit = NULL;
+               unsigned long long eventcode = 0;
                struct msrmap *msr = NULL;
                jsmntok_t *msrval = NULL;
                jsmntok_t *precise = NULL;
@@ -376,6 +409,16 @@ int json_events(const char *fn,
                        nz = !json_streq(map, val, "0");
                        if (match_field(map, field, nz, &event, val)) {
                                /* ok */
+                       } else if (json_streq(map, field, "EventCode")) {
+                               char *code = NULL;
+                               addfield(map, &code, "", "", val);
+                               eventcode |= strtoul(code, NULL, 0);
+                               free(code);
+                       } else if (json_streq(map, field, "ExtSel")) {
+                               char *code = NULL;
+                               addfield(map, &code, "", "", val);
+                               eventcode |= strtoul(code, NULL, 0) << 21;
+                               free(code);
                        } else if (json_streq(map, field, "EventName")) {
                                addfield(map, &name, "", "", val);
                        } else if (json_streq(map, field, "BriefDescription")) {
@@ -399,6 +442,28 @@ int json_events(const char *fn,
                                addfield(map, &extra_desc, ". ",
                                        " Supports address when precise",
                                        NULL);
+                       } else if (json_streq(map, field, "Unit")) {
+                               const char *ppmu;
+                               char *s;
+
+                               ppmu = field_to_perf(unit_to_pmu, map, val);
+                               if (ppmu) {
+                                       pmu = strdup(ppmu);
+                               } else {
+                                       if (!pmu)
+                                               pmu = strdup("uncore_");
+                                       addfield(map, &pmu, "", "", val);
+                                       for (s = pmu; *s; s++)
+                                               *s = tolower(*s);
+                               }
+                               addfield(map, &desc, ". ", "Unit: ", NULL);
+                               addfield(map, &desc, "", pmu, NULL);
+                       } else if (json_streq(map, field, "Filter")) {
+                               addfield(map, &filter, "", "", val);
+                       } else if (json_streq(map, field, "ScaleUnit")) {
+                               addfield(map, &unit, "", "", val);
+                       } else if (json_streq(map, field, "PerPkg")) {
+                               addfield(map, &perpkg, "", "", val);
                        }
                        /* ignore unknown fields */
                }
@@ -410,20 +475,29 @@ int json_events(const char *fn,
                                addfield(map, &extra_desc, " ",
                                                "(Precise event)", NULL);
                }
+               snprintf(buf, sizeof buf, "event=%#llx", eventcode);
+               addfield(map, &event, ",", buf, NULL);
                if (desc && extra_desc)
                        addfield(map, &desc, " ", extra_desc, NULL);
                if (long_desc && extra_desc)
                        addfield(map, &long_desc, " ", extra_desc, NULL);
+               if (filter)
+                       addfield(map, &event, ",", filter, NULL);
                if (msr != NULL)
                        addfield(map, &event, ",", msr->pname, msrval);
                fixname(name);
 
-               err = func(data, name, real_event(name, event), desc, long_desc);
+               err = func(data, name, real_event(name, event), desc, long_desc,
+                               pmu, unit, perpkg);
                free(event);
                free(desc);
                free(name);
                free(long_desc);
                free(extra_desc);
+               free(pmu);
+               free(filter);
+               free(perpkg);
+               free(unit);
                if (err)
                        break;
                tok += j;
index b0eb2744b498876780bd56609ced6fcfd8691675..71e13de31092f2de94827573f4e4dc537df8b726 100644 (file)
@@ -3,7 +3,9 @@
 
 int json_events(const char *fn,
                int (*func)(void *data, char *name, char *event, char *desc,
-                               char *long_desc),
+                               char *long_desc,
+                               char *pmu,
+                               char *unit, char *perpkg),
                void *data);
 char *get_cpu_str(void);
 
index 2eaef595d8a05f25fdade15abdda686353f0d74f..c669a3cdb9f0279c96caa80cf68aab2862e202a9 100644 (file)
@@ -10,6 +10,9 @@ struct pmu_event {
        const char *desc;
        const char *topic;
        const char *long_desc;
+       const char *pmu;
+       const char *unit;
+       const char *perpkg;
 };
 
 /*
index 6676c2dd6dcb946c7322a17630c90f610fce6356..1cb3d9b540e9eda133f4548efc9316ad0d014898 100644 (file)
@@ -44,6 +44,7 @@ perf-y += is_printable_array.o
 perf-y += bitmap.o
 perf-y += perf-hooks.o
 perf-y += clang.o
+perf-y += unit_number__scnprintf.o
 
 $(OUTPUT)tests/llvm-src-base.c: tests/bpf-script-example.c tests/Build
        $(call rule_mkdir)
index 92343f43e44aa660e5ee1e37c2d721aabafc5fcf..1a04fe77487dc54512b3764f8a89961364c9ad45 100644 (file)
@@ -5,11 +5,13 @@
 #include <util/evlist.h>
 #include <linux/bpf.h>
 #include <linux/filter.h>
+#include <api/fs/fs.h>
 #include <bpf/bpf.h>
 #include "tests.h"
 #include "llvm.h"
 #include "debug.h"
 #define NR_ITERS       111
+#define PERF_TEST_BPF_PATH "/sys/fs/bpf/perf_test"
 
 #ifdef HAVE_LIBBPF_SUPPORT
 
@@ -54,6 +56,7 @@ static struct {
        const char *msg_load_fail;
        int (*target_func)(void);
        int expect_result;
+       bool    pin;
 } bpf_testcase_table[] = {
        {
                LLVM_TESTCASE_BASE,
@@ -63,6 +66,17 @@ static struct {
                "load bpf object failed",
                &epoll_wait_loop,
                (NR_ITERS + 1) / 2,
+               false,
+       },
+       {
+               LLVM_TESTCASE_BASE,
+               "BPF pinning",
+               "[bpf_pinning]",
+               "fix kbuild first",
+               "check your vmlinux setting?",
+               &epoll_wait_loop,
+               (NR_ITERS + 1) / 2,
+               true,
        },
 #ifdef HAVE_BPF_PROLOGUE
        {
@@ -73,6 +87,7 @@ static struct {
                "check your vmlinux setting?",
                &llseek_loop,
                (NR_ITERS + 1) / 4,
+               false,
        },
 #endif
        {
@@ -83,6 +98,7 @@ static struct {
                "libbpf error when dealing with relocation",
                NULL,
                0,
+               false,
        },
 };
 
@@ -226,10 +242,34 @@ static int __test__bpf(int idx)
                goto out;
        }
 
-       if (obj)
+       if (obj) {
                ret = do_test(obj,
                              bpf_testcase_table[idx].target_func,
                              bpf_testcase_table[idx].expect_result);
+               if (ret != TEST_OK)
+                       goto out;
+               if (bpf_testcase_table[idx].pin) {
+                       int err;
+
+                       if (!bpf_fs__mount()) {
+                               pr_debug("BPF filesystem not mounted\n");
+                               ret = TEST_FAIL;
+                               goto out;
+                       }
+                       err = mkdir(PERF_TEST_BPF_PATH, 0777);
+                       if (err && errno != EEXIST) {
+                               pr_debug("Failed to make perf_test dir: %s\n",
+                                        strerror(errno));
+                               ret = TEST_FAIL;
+                               goto out;
+                       }
+                       if (bpf_object__pin(obj, PERF_TEST_BPF_PATH))
+                               ret = TEST_FAIL;
+                       if (rm_rf(PERF_TEST_BPF_PATH))
+                               ret = TEST_FAIL;
+               }
+       }
+
 out:
        bpf__clear();
        return ret;
index a77dcc0d24e3f356c9da230cd84a35eddde99852..37e326bfd2dc3a273032eeac68de3c8d4383104f 100644 (file)
@@ -246,6 +246,10 @@ static struct test generic_tests[] = {
                        .get_desc       = test__clang_subtest_get_desc,
                }
        },
+       {
+               .desc = "unit_number__scnprintf",
+               .func = test__unit_number__scnprint,
+       },
        {
                .func = NULL,
        },
index 02a33ebcd992b68a3d1adbf05a2e7bb13298a45f..d357dab72e68862e90d916092755a12f535ec1a4 100644 (file)
@@ -13,7 +13,7 @@ static int test__bpf_parsing(void *obj_buf, size_t obj_buf_sz)
        struct bpf_object *obj;
 
        obj = bpf_object__open_buffer(obj_buf, obj_buf_sz, NULL);
-       if (IS_ERR(obj))
+       if (libbpf_get_error(obj))
                return TEST_FAIL;
        bpf_object__close(obj);
        return TEST_OK;
index 20c2e641c42265b1606d751c64ffec6bdf2e5d3c..aa9276bfe3e9b7b6ed1def3889142c645c1e8e30 100644 (file)
@@ -1779,15 +1779,14 @@ static int test_pmu_events(void)
        }
 
        while (!ret && (ent = readdir(dir))) {
-#define MAX_NAME 100
                struct evlist_test e;
-               char name[MAX_NAME];
+               char name[2 * NAME_MAX + 1 + 12 + 3];
 
                /* Names containing . are special and cannot be used directly */
                if (strchr(ent->d_name, '.'))
                        continue;
 
-               snprintf(name, MAX_NAME, "cpu/event=%s/u", ent->d_name);
+               snprintf(name, sizeof(name), "cpu/event=%s/u", ent->d_name);
 
                e.name  = name;
                e.check = test__checkevent_pmu_events;
@@ -1795,11 +1794,10 @@ static int test_pmu_events(void)
                ret = test_event(&e);
                if (ret)
                        break;
-               snprintf(name, MAX_NAME, "%s:u,cpu/event=%s/u", ent->d_name, ent->d_name);
+               snprintf(name, sizeof(name), "%s:u,cpu/event=%s/u", ent->d_name, ent->d_name);
                e.name  = name;
                e.check = test__checkevent_pmu_events_mix;
                ret = test_event(&e);
-#undef MAX_NAME
        }
 
        closedir(dir);
index 81c6eeaca0f53ec00d9cabca4d13119856b0c719..65dcf48a92fbc2bbc7e89fc8a5243c1bcec3eead 100644 (file)
@@ -50,7 +50,8 @@ static int process_events(union perf_event **events, size_t count)
 }
 
 struct test_attr_event {
-       struct attr_event attr;
+       struct perf_event_header header;
+       struct perf_event_attr   attr;
        u64 id;
 };
 
@@ -71,20 +72,16 @@ int test__parse_no_sample_id_all(int subtest __maybe_unused)
        int err;
 
        struct test_attr_event event1 = {
-               .attr = {
-                       .header = {
-                               .type = PERF_RECORD_HEADER_ATTR,
-                               .size = sizeof(struct test_attr_event),
-                       },
+               .header = {
+                       .type = PERF_RECORD_HEADER_ATTR,
+                       .size = sizeof(struct test_attr_event),
                },
                .id = 1,
        };
        struct test_attr_event event2 = {
-               .attr = {
-                       .header = {
-                               .type = PERF_RECORD_HEADER_ATTR,
-                               .size = sizeof(struct test_attr_event),
-                       },
+               .header = {
+                       .type = PERF_RECORD_HEADER_ATTR,
+                       .size = sizeof(struct test_attr_event),
                },
                .id = 2,
        };
index 8f2e1de6d0eae93d0bb8de2e12602f9898176136..541da7a68f91fc4631c0c20f313a9b58f720771b 100644 (file)
@@ -66,7 +66,7 @@ int test__PERF_RECORD(int subtest __maybe_unused)
        if (evlist == NULL) /* Fallback for kernels lacking PERF_COUNT_SW_DUMMY */
                evlist = perf_evlist__new_default();
 
-       if (evlist == NULL || argv == NULL) {
+       if (evlist == NULL) {
                pr_debug("Not enough memory to create evlist\n");
                goto out;
        }
index a512f0c8ff5b50160b0206c602769df89185c521..1fa9b9d83aa51ba80beb9df29b48ee58be7f4619 100644 (file)
@@ -96,6 +96,7 @@ int test__perf_hooks(int subtest);
 int test__clang(int subtest);
 const char *test__clang_subtest_get_desc(int subtest);
 int test__clang_subtest_get_nr(void);
+int test__unit_number__scnprint(int subtest);
 
 #if defined(__arm__) || defined(__aarch64__)
 #ifdef HAVE_DWARF_UNWIND_SUPPORT
diff --git a/tools/perf/tests/unit_number__scnprintf.c b/tools/perf/tests/unit_number__scnprintf.c
new file mode 100644 (file)
index 0000000..623c2aa
--- /dev/null
@@ -0,0 +1,37 @@
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include "tests.h"
+#include "util.h"
+#include "debug.h"
+
+int test__unit_number__scnprint(int subtest __maybe_unused)
+{
+       struct {
+               u64              n;
+               const char      *str;
+       } test[] = {
+               { 1,                    "1B"    },
+               { 10*1024,              "10K"   },
+               { 20*1024*1024,         "20M"   },
+               { 30*1024*1024*1024ULL, "30G"   },
+               { 0,                    "0B"    },
+               { 0,                    NULL    },
+       };
+       unsigned i = 0;
+
+       while (test[i].str) {
+               char buf[100];
+
+               unit_number__scnprintf(buf, sizeof(buf), test[i].n);
+
+               pr_debug("n %" PRIu64 ", str '%s', buf '%s'\n",
+                        test[i].n, test[i].str, buf);
+
+               if (strcmp(test[i].str, buf))
+                       return TEST_FAIL;
+
+               i++;
+       }
+
+       return TEST_OK;
+}
index 641b40234a9d7c4d420840c9812dc1bbaceba697..fc4fb669ceee37404bdda0e2ba0652f4304c28f8 100644 (file)
@@ -501,8 +501,8 @@ static int hierarchy_set_folding(struct hist_browser *hb, struct hist_entry *he,
        return n;
 }
 
-static void hist_entry__set_folding(struct hist_entry *he,
-                                   struct hist_browser *hb, bool unfold)
+static void __hist_entry__set_folding(struct hist_entry *he,
+                                     struct hist_browser *hb, bool unfold)
 {
        hist_entry__init_have_children(he);
        he->unfolded = unfold ? he->has_children : false;
@@ -520,12 +520,34 @@ static void hist_entry__set_folding(struct hist_entry *he,
                he->nr_rows = 0;
 }
 
+static void hist_entry__set_folding(struct hist_entry *he,
+                                   struct hist_browser *browser, bool unfold)
+{
+       double percent;
+
+       percent = hist_entry__get_percent_limit(he);
+       if (he->filtered || percent < browser->min_pcnt)
+               return;
+
+       __hist_entry__set_folding(he, browser, unfold);
+
+       if (!he->depth || unfold)
+               browser->nr_hierarchy_entries++;
+       if (he->leaf)
+               browser->nr_callchain_rows += he->nr_rows;
+       else if (unfold && !hist_entry__has_hierarchy_children(he, browser->min_pcnt)) {
+               browser->nr_hierarchy_entries++;
+               he->has_no_entry = true;
+               he->nr_rows = 1;
+       } else
+               he->has_no_entry = false;
+}
+
 static void
 __hist_browser__set_folding(struct hist_browser *browser, bool unfold)
 {
        struct rb_node *nd;
        struct hist_entry *he;
-       double percent;
 
        nd = rb_first(&browser->hists->entries);
        while (nd) {
@@ -535,21 +557,6 @@ __hist_browser__set_folding(struct hist_browser *browser, bool unfold)
                nd = __rb_hierarchy_next(nd, HMD_FORCE_CHILD);
 
                hist_entry__set_folding(he, browser, unfold);
-
-               percent = hist_entry__get_percent_limit(he);
-               if (he->filtered || percent < browser->min_pcnt)
-                       continue;
-
-               if (!he->depth || unfold)
-                       browser->nr_hierarchy_entries++;
-               if (he->leaf)
-                       browser->nr_callchain_rows += he->nr_rows;
-               else if (unfold && !hist_entry__has_hierarchy_children(he, browser->min_pcnt)) {
-                       browser->nr_hierarchy_entries++;
-                       he->has_no_entry = true;
-                       he->nr_rows = 1;
-               } else
-                       he->has_no_entry = false;
        }
 }
 
@@ -564,6 +571,15 @@ static void hist_browser__set_folding(struct hist_browser *browser, bool unfold)
        ui_browser__reset_index(&browser->b);
 }
 
+static void hist_browser__set_folding_selected(struct hist_browser *browser, bool unfold)
+{
+       if (!browser->he_selection)
+               return;
+
+       hist_entry__set_folding(browser->he_selection, browser, unfold);
+       browser->b.nr_entries = hist_browser__nr_entries(browser);
+}
+
 static void ui_browser__warn_lost_events(struct ui_browser *browser)
 {
        ui_browser__warning(browser, 4,
@@ -637,10 +653,18 @@ int hist_browser__run(struct hist_browser *browser, const char *help)
                        /* Collapse the whole world. */
                        hist_browser__set_folding(browser, false);
                        break;
+               case 'c':
+                       /* Collapse the selected entry. */
+                       hist_browser__set_folding_selected(browser, false);
+                       break;
                case 'E':
                        /* Expand the whole world. */
                        hist_browser__set_folding(browser, true);
                        break;
+               case 'e':
+                       /* Expand the selected entry. */
+                       hist_browser__set_folding_selected(browser, true);
+                       break;
                case 'H':
                        browser->show_headers = !browser->show_headers;
                        hist_browser__update_rows(browser);
index 37388397b5bc051b66dca810c65a4434eea07bb6..18cfcdc90356f89f75b328870cc5fcc621b2005f 100644 (file)
@@ -521,6 +521,12 @@ void perf_hpp_list__register_sort_field(struct perf_hpp_list *list,
        list_add_tail(&format->sort_list, &list->sorts);
 }
 
+void perf_hpp_list__prepend_sort_field(struct perf_hpp_list *list,
+                                      struct perf_hpp_fmt *format)
+{
+       list_add(&format->sort_list, &list->sorts);
+}
+
 void perf_hpp__column_unregister(struct perf_hpp_fmt *format)
 {
        list_del(&format->list);
@@ -560,6 +566,10 @@ void perf_hpp__setup_output_field(struct perf_hpp_list *list)
        perf_hpp_list__for_each_sort_list(list, fmt) {
                struct perf_hpp_fmt *pos;
 
+               /* skip sort-only fields ("sort_compute" in perf diff) */
+               if (!fmt->entry && !fmt->color)
+                       continue;
+
                perf_hpp_list__for_each_format(list, pos) {
                        if (fmt_equal(fmt, pos))
                                goto next;
index 1f6b0994f4f4190c8498292771f4ecc3bb09c277..50d13e58210f0d7dc4b59421914317604757ac10 100644 (file)
@@ -7,6 +7,7 @@
 
 pthread_mutex_t ui__lock = PTHREAD_MUTEX_INITIALIZER;
 void *perf_gtk_handle;
+int use_browser = -1;
 
 #ifdef HAVE_GTK2_SUPPORT
 static int setup_gtk_browser(void)
index 3840e3a870579e7ac8898306b881fb0f40ed00c0..5da376bc1afca6733664f798e0e9d3050dab6e21 100644 (file)
@@ -162,6 +162,7 @@ CFLAGS_rbtree.o        += -Wno-unused-parameter -DETC_PERFCONFIG="BUILD_STR($(ET
 CFLAGS_libstring.o     += -Wno-unused-parameter -DETC_PERFCONFIG="BUILD_STR($(ETC_PERFCONFIG_SQ))"
 CFLAGS_hweight.o       += -Wno-unused-parameter -DETC_PERFCONFIG="BUILD_STR($(ETC_PERFCONFIG_SQ))"
 CFLAGS_parse-events.o  += -Wno-redundant-decls
+CFLAGS_header.o        += -include $(OUTPUT)PERF-VERSION-FILE
 
 $(OUTPUT)util/kallsyms.o: ../lib/symbol/kallsyms.c FORCE
        $(call rule_mkdir)
index 36c861103291982516b7463422544ed279fe18ed..bc6bc7062eb4ffed74457bff52a5ab5c2d885300 100644 (file)
@@ -670,13 +670,13 @@ int bpf__probe(struct bpf_object *obj)
 
                err = convert_perf_probe_events(pev, 1);
                if (err < 0) {
-                       pr_debug("bpf_probe: failed to convert perf probe events");
+                       pr_debug("bpf_probe: failed to convert perf probe events\n");
                        goto out;
                }
 
                err = apply_perf_probe_events(pev, 1);
                if (err < 0) {
-                       pr_debug("bpf_probe: failed to apply perf probe events");
+                       pr_debug("bpf_probe: failed to apply perf probe events\n");
                        goto out;
                }
 
index 42922512c1c62fe09eb12f301c281ceb57c2bf3c..aba953421a0329d0de0f2adc5ce1a4599d0fe320 100644 (file)
@@ -48,6 +48,8 @@ static int parse_callchain_mode(const char *value)
                callchain_param.mode = CHAIN_FOLDED;
                return 0;
        }
+
+       pr_err("Invalid callchain mode: %s\n", value);
        return -1;
 }
 
@@ -63,6 +65,8 @@ static int parse_callchain_order(const char *value)
                callchain_param.order_set = true;
                return 0;
        }
+
+       pr_err("Invalid callchain order: %s\n", value);
        return -1;
 }
 
@@ -80,6 +84,8 @@ static int parse_callchain_sort_key(const char *value)
                callchain_param.branch_callstack = 1;
                return 0;
        }
+
+       pr_err("Invalid callchain sort key: %s\n", value);
        return -1;
 }
 
@@ -97,6 +103,8 @@ static int parse_callchain_value(const char *value)
                callchain_param.value = CCVAL_COUNT;
                return 0;
        }
+
+       pr_err("Invalid callchain config key: %s\n", value);
        return -1;
 }
 
@@ -210,13 +218,17 @@ int perf_callchain_config(const char *var, const char *value)
                return parse_callchain_sort_key(value);
        if (!strcmp(var, "threshold")) {
                callchain_param.min_percent = strtod(value, &endptr);
-               if (value == endptr)
+               if (value == endptr) {
+                       pr_err("Invalid callchain threshold: %s\n", value);
                        return -1;
+               }
        }
        if (!strcmp(var, "print-limit")) {
                callchain_param.print_limit = strtod(value, &endptr);
-               if (value == endptr)
+               if (value == endptr) {
+                       pr_err("Invalid callchain print limit: %s\n", value);
                        return -1;
+               }
        }
 
        return 0;
@@ -437,7 +449,7 @@ fill_node(struct callchain_node *node, struct callchain_cursor *cursor)
                }
                call->ip = cursor_node->ip;
                call->ms.sym = cursor_node->sym;
-               call->ms.map = cursor_node->map;
+               call->ms.map = map__get(cursor_node->map);
 
                if (cursor_node->branch) {
                        call->branch_count = 1;
@@ -477,6 +489,7 @@ add_child(struct callchain_node *parent,
 
                list_for_each_entry_safe(call, tmp, &new->val, list) {
                        list_del(&call->list);
+                       map__zput(call->ms.map);
                        free(call);
                }
                free(new);
@@ -761,6 +774,7 @@ merge_chain_branch(struct callchain_cursor *cursor,
                                        list->ms.map, list->ms.sym,
                                        false, NULL, 0, 0);
                list_del(&list->list);
+               map__zput(list->ms.map);
                free(list);
        }
 
@@ -811,7 +825,8 @@ int callchain_cursor_append(struct callchain_cursor *cursor,
        }
 
        node->ip = ip;
-       node->map = map;
+       map__zput(node->map);
+       node->map = map__get(map);
        node->sym = sym;
        node->branch = branch;
        node->nr_loop_iter = nr_loop_iter;
@@ -1142,11 +1157,13 @@ static void free_callchain_node(struct callchain_node *node)
 
        list_for_each_entry_safe(list, tmp, &node->parent_val, list) {
                list_del(&list->list);
+               map__zput(list->ms.map);
                free(list);
        }
 
        list_for_each_entry_safe(list, tmp, &node->val, list) {
                list_del(&list->list);
+               map__zput(list->ms.map);
                free(list);
        }
 
@@ -1210,6 +1227,7 @@ int callchain_node__make_parent_list(struct callchain_node *node)
                                goto out;
                        *new = *chain;
                        new->has_children = false;
+                       map__get(new->ms.map);
                        list_add_tail(&new->list, &head);
                }
                parent = parent->parent;
@@ -1230,6 +1248,7 @@ int callchain_node__make_parent_list(struct callchain_node *node)
 out:
        list_for_each_entry_safe(chain, new, &head, list) {
                list_del(&chain->list);
+               map__zput(chain->ms.map);
                free(chain);
        }
        return -ENOMEM;
index 35c8e379530f2f1e02d050a25a2ef786ee10a819..4f4b60f1558a827cbe34d9d387e6a91a917b58dd 100644 (file)
@@ -5,6 +5,7 @@
 #include <linux/list.h>
 #include <linux/rbtree.h>
 #include "event.h"
+#include "map.h"
 #include "symbol.h"
 
 #define HELP_PAD "\t\t\t\t"
@@ -184,8 +185,13 @@ int callchain_merge(struct callchain_cursor *cursor,
  */
 static inline void callchain_cursor_reset(struct callchain_cursor *cursor)
 {
+       struct callchain_cursor_node *node;
+
        cursor->nr = 0;
        cursor->last = &cursor->first;
+
+       for (node = cursor->first; node != NULL; node = node->next)
+               map__zput(node->map);
 }
 
 int callchain_cursor_append(struct callchain_cursor *cursor, u64 ip,
index 3d906dbbef74b768ed5c1f72f08af0948afc7535..0c7d5a4975cd4a8f798533cbd15660c058652d37 100644 (file)
@@ -386,8 +386,10 @@ static int perf_buildid_config(const char *var, const char *value)
        if (!strcmp(var, "buildid.dir")) {
                const char *dir = perf_config_dirname(var, value);
 
-               if (!dir)
+               if (!dir) {
+                       pr_err("Invalid buildid directory!\n");
                        return -1;
+               }
                strncpy(buildid_dir, dir, MAXPATHLEN-1);
                buildid_dir[MAXPATHLEN-1] = '\0';
        }
@@ -405,10 +407,9 @@ static int perf_default_core_config(const char *var __maybe_unused,
 static int perf_ui_config(const char *var, const char *value)
 {
        /* Add other config variables here. */
-       if (!strcmp(var, "ui.show-headers")) {
+       if (!strcmp(var, "ui.show-headers"))
                symbol_conf.show_hist_headers = perf_config_bool(var, value);
-               return 0;
-       }
+
        return 0;
 }
 
@@ -646,8 +647,13 @@ static int perf_config_set__init(struct perf_config_set *set)
                        goto out;
                }
 
-               if (stat(user_config, &st) < 0)
+               if (stat(user_config, &st) < 0) {
+                       if (errno == ENOENT)
+                               ret = 0;
                        goto out_free;
+               }
+
+               ret = 0;
 
                if (st.st_uid && (st.st_uid != geteuid())) {
                        warning("File %s not owned by current user or root, "
@@ -655,11 +661,8 @@ static int perf_config_set__init(struct perf_config_set *set)
                        goto out_free;
                }
 
-               if (!st.st_size)
-                       goto out_free;
-
-               ret = perf_config_from_file(collect_config, user_config, set);
-
+               if (st.st_size)
+                       ret = perf_config_from_file(collect_config, user_config, set);
 out_free:
                free(user_config);
        }
index 7123f4de32ccbc4beb835260b25a9ca08df789f1..4e6cbc99f08efc608c2d73ffb9054672d25fb7d9 100644 (file)
@@ -1473,7 +1473,7 @@ int bt_convert__perf2ctf(const char *input, const char *path,
                },
        };
        struct ctf_writer *cw = &c.writer;
-       int err = -1;
+       int err;
 
        if (opts->all) {
                c.tool.comm = process_comm_event;
@@ -1481,12 +1481,15 @@ int bt_convert__perf2ctf(const char *input, const char *path,
                c.tool.fork = process_fork_event;
        }
 
-       perf_config(convert__config, &c);
+       err = perf_config(convert__config, &c);
+       if (err)
+               return err;
 
        /* CTF writer */
        if (ctf_writer__init(cw, path))
                return -1;
 
+       err = -1;
        /* perf.data session */
        session = perf_session__new(&file, 0, &c.tool);
        if (!session)
index d2c6cdd9d42b72a194d913bdc1840d5ff72bb281..28d41e709128f51414817ad21a16ea636c007709 100644 (file)
@@ -9,6 +9,13 @@
 #include "debug.h"
 #include "vdso.h"
 
+static const char * const debuglink_paths[] = {
+       "%.0s%s",
+       "%s/%s",
+       "%s/.debug/%s",
+       "/usr/lib/debug%s/%s"
+};
+
 char dso__symtab_origin(const struct dso *dso)
 {
        static const char origin[] = {
@@ -44,24 +51,43 @@ int dso__read_binary_type_filename(const struct dso *dso,
        size_t len;
 
        switch (type) {
-       case DSO_BINARY_TYPE__DEBUGLINK: {
-               char *debuglink;
+       case DSO_BINARY_TYPE__DEBUGLINK:
+       {
+               const char *last_slash;
+               char dso_dir[PATH_MAX];
+               char symfile[PATH_MAX];
+               unsigned int i;
 
                len = __symbol__join_symfs(filename, size, dso->long_name);
-               debuglink = filename + len;
-               while (debuglink != filename && *debuglink != '/')
-                       debuglink--;
-               if (*debuglink == '/')
-                       debuglink++;
+               last_slash = filename + len;
+               while (last_slash != filename && *last_slash != '/')
+                       last_slash--;
 
-               ret = -1;
-               if (!is_regular_file(filename))
+               strncpy(dso_dir, filename, last_slash - filename);
+               dso_dir[last_slash-filename] = '\0';
+
+               if (!is_regular_file(filename)) {
+                       ret = -1;
+                       break;
+               }
+
+               ret = filename__read_debuglink(filename, symfile, PATH_MAX);
+               if (ret)
                        break;
 
-               ret = filename__read_debuglink(filename, debuglink,
-                                              size - (debuglink - filename));
+               /* Check predefined locations where debug file might reside */
+               ret = -1;
+               for (i = 0; i < ARRAY_SIZE(debuglink_paths); i++) {
+                       snprintf(filename, size,
+                                       debuglink_paths[i], dso_dir, symfile);
+                       if (is_regular_file(filename)) {
+                               ret = 0;
+                               break;
+                       }
                }
+
                break;
+       }
        case DSO_BINARY_TYPE__BUILD_ID_CACHE:
                if (dso__build_id_filename(dso, filename, size) == NULL)
                        ret = -1;
index 8ab0d7da956bcb3244d98ee27284034d1fe1b00c..4ea7ce72ed9c8e3fb92ffca44b3f51fb09f6b01f 100644 (file)
@@ -1,5 +1,5 @@
 #include <linux/types.h>
-#include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */
+#include <linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */
 #include <api/fs/fs.h>
 #include "event.h"
 #include "debug.h"
index d92e02006fb853d0b123d6456669d1ac633c8881..b601f2814a30e9d7d2b6f689b5e495659c34e600 100644 (file)
@@ -1184,7 +1184,7 @@ unsigned long perf_event_mlock_kb_in_pages(void)
        return pages;
 }
 
-static size_t perf_evlist__mmap_size(unsigned long pages)
+size_t perf_evlist__mmap_size(unsigned long pages)
 {
        if (pages == UINT_MAX)
                pages = perf_event_mlock_kb_in_pages();
@@ -1224,12 +1224,16 @@ static long parse_pages_arg(const char *str, unsigned long min,
        if (pages == 0 && min == 0) {
                /* leave number of pages at 0 */
        } else if (!is_power_of_2(pages)) {
+               char buf[100];
+
                /* round pages up to next power of 2 */
                pages = roundup_pow_of_two(pages);
                if (!pages)
                        return -EINVAL;
-               pr_info("rounding mmap pages size to %lu bytes (%lu pages)\n",
-                       pages * page_size, pages);
+
+               unit_number__scnprintf(buf, sizeof(buf), pages * page_size);
+               pr_info("rounding mmap pages size to %s (%lu pages)\n",
+                       buf, pages);
        }
 
        if (pages > max)
@@ -1797,7 +1801,7 @@ int perf_evlist__start_workload(struct perf_evlist *evlist)
                 */
                ret = write(evlist->workload.cork_fd, &bf, 1);
                if (ret < 0)
-                       perror("enable to write to pipe");
+                       perror("unable to write to pipe");
 
                close(evlist->workload.cork_fd);
                return ret;
index 4fd034f22d2fc2c2bd6b1050a42447313b9207cb..389b9ccdf8c75db39d8e2163d6909a4db01e65a8 100644 (file)
@@ -218,6 +218,8 @@ int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
                      bool overwrite);
 void perf_evlist__munmap(struct perf_evlist *evlist);
 
+size_t perf_evlist__mmap_size(unsigned long pages);
+
 void perf_evlist__disable(struct perf_evlist *evlist);
 void perf_evlist__enable(struct perf_evlist *evlist);
 void perf_evlist__toggle_enable(struct perf_evlist *evlist);
index 04e536ae4d88423e3c05a98fd1bd97ff64379b14..ac59710b79e0b4cbee35cacc1e40a2102e091d8e 100644 (file)
@@ -1448,8 +1448,8 @@ static bool ignore_missing_thread(struct perf_evsel *evsel,
        return true;
 }
 
-static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
-                             struct thread_map *threads)
+int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
+                    struct thread_map *threads)
 {
        int cpu, thread, nthreads;
        unsigned long flags = PERF_FLAG_FD_CLOEXEC;
@@ -1459,6 +1459,30 @@ static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
        if (perf_missing_features.write_backward && evsel->attr.write_backward)
                return -EINVAL;
 
+       if (cpus == NULL) {
+               static struct cpu_map *empty_cpu_map;
+
+               if (empty_cpu_map == NULL) {
+                       empty_cpu_map = cpu_map__dummy_new();
+                       if (empty_cpu_map == NULL)
+                               return -ENOMEM;
+               }
+
+               cpus = empty_cpu_map;
+       }
+
+       if (threads == NULL) {
+               static struct thread_map *empty_thread_map;
+
+               if (empty_thread_map == NULL) {
+                       empty_thread_map = thread_map__new_by_tid(-1);
+                       if (empty_thread_map == NULL)
+                               return -ENOMEM;
+               }
+
+               threads = empty_thread_map;
+       }
+
        if (evsel->system_wide)
                nthreads = 1;
        else
@@ -1655,46 +1679,16 @@ void perf_evsel__close(struct perf_evsel *evsel, int ncpus, int nthreads)
        perf_evsel__free_fd(evsel);
 }
 
-static struct {
-       struct cpu_map map;
-       int cpus[1];
-} empty_cpu_map = {
-       .map.nr = 1,
-       .cpus   = { -1, },
-};
-
-static struct {
-       struct thread_map map;
-       int threads[1];
-} empty_thread_map = {
-       .map.nr  = 1,
-       .threads = { -1, },
-};
-
-int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
-                    struct thread_map *threads)
-{
-       if (cpus == NULL) {
-               /* Work around old compiler warnings about strict aliasing */
-               cpus = &empty_cpu_map.map;
-       }
-
-       if (threads == NULL)
-               threads = &empty_thread_map.map;
-
-       return __perf_evsel__open(evsel, cpus, threads);
-}
-
 int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
                             struct cpu_map *cpus)
 {
-       return __perf_evsel__open(evsel, cpus, &empty_thread_map.map);
+       return perf_evsel__open(evsel, cpus, NULL);
 }
 
 int perf_evsel__open_per_thread(struct perf_evsel *evsel,
                                struct thread_map *threads)
 {
-       return __perf_evsel__open(evsel, &empty_cpu_map.map, threads);
+       return perf_evsel__open(evsel, NULL, threads);
 }
 
 static int perf_evsel__parse_id_sample(const struct perf_evsel *evsel,
@@ -2469,7 +2463,9 @@ int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target,
                 "  -1: Allow use of (almost) all events by all users\n"
                 ">= 0: Disallow raw tracepoint access by users without CAP_IOC_LOCK\n"
                 ">= 1: Disallow CPU event access by users without CAP_SYS_ADMIN\n"
-                ">= 2: Disallow kernel profiling by users without CAP_SYS_ADMIN",
+                ">= 2: Disallow kernel profiling by users without CAP_SYS_ADMIN\n\n"
+                "To make this setting permanent, edit /etc/sysctl.conf too, e.g.:\n\n"
+                "      kernel.perf_event_paranoid = -1\n" ,
                                 target->system_wide ? "system-wide " : "",
                                 perf_event_paranoid());
        case ENOENT:
index 6b2925542c0a641d92ca15db702a43fbe15946c4..4ef5184819a0975d592aecff5204d9d273494b4f 100644 (file)
@@ -168,7 +168,6 @@ int sample__fprintf_callchain(struct perf_sample *sample, int left_alignment,
 
                        if (symbol_conf.bt_stop_list &&
                            node->sym &&
-                           node->sym->name &&
                            strlist__has_entry(symbol_conf.bt_stop_list,
                                               node->sym->name)) {
                                break;
index d89c9c7ef4e54ba6381c7b63d8db29c87328b3ea..3d12c16e51034a8591eba97221d20b37603dd40a 100644 (file)
@@ -41,6 +41,8 @@ static const u64 __perf_magic2_sw = 0x50455246494c4532ULL;
 
 #define PERF_MAGIC     __perf_magic2
 
+const char perf_version_string[] = PERF_VERSION;
+
 struct perf_file_attr {
        struct perf_event_attr  attr;
        struct perf_file_section        ids;
@@ -2801,8 +2803,10 @@ static int perf_evsel__prepare_tracepoint_event(struct perf_evsel *evsel,
        }
 
        event = pevent_find_event(pevent, evsel->attr.config);
-       if (event == NULL)
+       if (event == NULL) {
+               pr_debug("cannot find event format for %d\n", (int)evsel->attr.config);
                return -1;
+       }
 
        if (!evsel->name) {
                snprintf(bf, sizeof(bf), "%s:%s", event->system, event->name);
@@ -3201,6 +3205,7 @@ int perf_event__process_event_update(struct perf_tool *tool __maybe_unused,
        case PERF_EVENT_UPDATE__SCALE:
                ev_scale = (struct event_update_event_scale *) ev->data;
                evsel->scale = ev_scale->scale;
+               break;
        case PERF_EVENT_UPDATE__CPUS:
                ev_cpus = (struct event_update_event_cpus *) ev->data;
 
index 6770a964560954e0b7d028698a50b8bcab0cf891..32c6a939e4cc6879d872574e27b7dab28970cb2c 100644 (file)
@@ -1,6 +1,7 @@
 #include "util.h"
 #include "build-id.h"
 #include "hist.h"
+#include "map.h"
 #include "session.h"
 #include "sort.h"
 #include "evlist.h"
@@ -1019,6 +1020,10 @@ int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
                         int max_stack_depth, void *arg)
 {
        int err, err2;
+       struct map *alm = NULL;
+
+       if (al && al->map)
+               alm = map__get(al->map);
 
        err = sample__resolve_callchain(iter->sample, &callchain_cursor, &iter->parent,
                                        iter->evsel, al, max_stack_depth);
@@ -1058,6 +1063,8 @@ out:
        if (!err)
                err = err2;
 
+       map__put(alm);
+
        return err;
 }
 
@@ -2439,8 +2446,10 @@ int parse_filter_percentage(const struct option *opt __maybe_unused,
                symbol_conf.filter_relative = true;
        else if (!strcmp(arg, "absolute"))
                symbol_conf.filter_relative = false;
-       else
+       else {
+               pr_debug("Invalud percentage: %s\n", arg);
                return -1;
+       }
 
        return 0;
 }
index d4b6514eeef5dae417cfc76f1fc2de9cc869575f..28c216e3d5b72557825a8918ab2aa6d6369c40f8 100644 (file)
@@ -283,6 +283,8 @@ void perf_hpp_list__column_register(struct perf_hpp_list *list,
                                    struct perf_hpp_fmt *format);
 void perf_hpp_list__register_sort_field(struct perf_hpp_list *list,
                                        struct perf_hpp_fmt *format);
+void perf_hpp_list__prepend_sort_field(struct perf_hpp_list *list,
+                                      struct perf_hpp_fmt *format);
 
 static inline void perf_hpp__column_register(struct perf_hpp_fmt *format)
 {
@@ -294,6 +296,11 @@ static inline void perf_hpp__register_sort_field(struct perf_hpp_fmt *format)
        perf_hpp_list__register_sort_field(&perf_hpp_list, format);
 }
 
+static inline void perf_hpp__prepend_sort_field(struct perf_hpp_fmt *format)
+{
+       perf_hpp_list__prepend_sort_field(&perf_hpp_list, format);
+}
+
 #define perf_hpp_list__for_each_format(_list, format) \
        list_for_each_entry(format, &(_list)->fields, list)
 
index 9b742ea8bfe8493b8220c1e34ca91ac784fd73c3..7aca5d6d7e1f05ded3ee5353358d9b4f7a71ee3e 100644 (file)
@@ -23,4 +23,8 @@ $(OUTPUT)util/intel-pt-decoder/intel-pt-insn-decoder.o: util/intel-pt-decoder/in
        $(call rule_mkdir)
        $(call if_changed_dep,cc_o_c)
 
-CFLAGS_intel-pt-insn-decoder.o += -I$(OUTPUT)util/intel-pt-decoder -Wno-override-init
+CFLAGS_intel-pt-insn-decoder.o += -I$(OUTPUT)util/intel-pt-decoder
+
+ifneq ($(CC), clang)
+  CFLAGS_intel-pt-insn-decoder.o += -Wno-override-init
+endif
index e4e7dc781d21d17b309513c54ff7c0cf0ab9d383..7cf7f7aca4d2e80cd3f39cf83bc515b26daaf497 100644 (file)
@@ -22,6 +22,7 @@
 #include <errno.h>
 #include <stdint.h>
 #include <inttypes.h>
+#include <linux/compiler.h>
 
 #include "../cache.h"
 #include "../util.h"
@@ -1746,6 +1747,7 @@ static int intel_pt_walk_psb(struct intel_pt_decoder *decoder)
                switch (decoder->packet.type) {
                case INTEL_PT_TIP_PGD:
                        decoder->continuous_period = false;
+                       __fallthrough;
                case INTEL_PT_TIP_PGE:
                case INTEL_PT_TIP:
                        intel_pt_log("ERROR: Unexpected packet\n");
@@ -1799,6 +1801,8 @@ static int intel_pt_walk_psb(struct intel_pt_decoder *decoder)
                        decoder->pge = false;
                        decoder->continuous_period = false;
                        intel_pt_clear_tx_flags(decoder);
+                       __fallthrough;
+
                case INTEL_PT_TNT:
                        decoder->have_tma = false;
                        intel_pt_log("ERROR: Unexpected packet\n");
@@ -1839,6 +1843,7 @@ static int intel_pt_walk_to_ip(struct intel_pt_decoder *decoder)
                switch (decoder->packet.type) {
                case INTEL_PT_TIP_PGD:
                        decoder->continuous_period = false;
+                       __fallthrough;
                case INTEL_PT_TIP_PGE:
                case INTEL_PT_TIP:
                        decoder->pge = decoder->packet.type != INTEL_PT_TIP_PGD;
index 4f7b32020487011a6bb04ec8d339e7d7c4dfe7cc..7528ae4f7e28e1d419c699759125c979e8dc1397 100644 (file)
@@ -17,6 +17,7 @@
 #include <string.h>
 #include <endian.h>
 #include <byteswap.h>
+#include <linux/compiler.h>
 
 #include "intel-pt-pkt-decoder.h"
 
@@ -498,6 +499,7 @@ int intel_pt_pkt_desc(const struct intel_pt_pkt *packet, char *buf,
        case INTEL_PT_FUP:
                if (!(packet->count))
                        return snprintf(buf, buf_len, "%s no ip", name);
+               __fallthrough;
        case INTEL_PT_CYC:
        case INTEL_PT_VMCS:
        case INTEL_PT_MTC:
index 85d5eeb66c75339fc7c230bfcd7bcabd975ee402..da20cd5612e97f53853a3fdfb79502cdfa5d3a6b 100644 (file)
@@ -2159,7 +2159,9 @@ int intel_pt_process_auxtrace_info(union perf_event *event,
 
        addr_filters__init(&pt->filts);
 
-       perf_config(intel_pt_perf_config, pt);
+       err = perf_config(intel_pt_perf_config, pt);
+       if (err)
+               goto err_free;
 
        err = auxtrace_queues__init(&pt->queues);
        if (err)
index b23ff44cf214fe7f786230a72a8466ffe7e91dc7..824356488ce6a8c7d9ecda1748456b262f0a7114 100644 (file)
@@ -48,8 +48,10 @@ int perf_llvm_config(const char *var, const char *value)
                llvm_param.kbuild_opts = strdup(value);
        else if (!strcmp(var, "dump-obj"))
                llvm_param.dump_obj = !!perf_config_bool(var, value);
-       else
+       else {
+               pr_debug("Invalid LLVM config option: %s\n", value);
                return -1;
+       }
        llvm_param.user_set_param = true;
        return 0;
 }
index 9b33bef545818cd530eea80b8790ac0a6fcc026e..71c9720d49730066dc1100defee124a4d3c198b9 100644 (file)
@@ -87,6 +87,25 @@ out_delete:
        return NULL;
 }
 
+struct machine *machine__new_kallsyms(void)
+{
+       struct machine *machine = machine__new_host();
+       /*
+        * FIXME:
+        * 1) MAP__FUNCTION will go away when we stop loading separate maps for
+        *    functions and data objects.
+        * 2) We should switch to machine__load_kallsyms(), i.e. not explicitely
+        *    ask for not using the kcore parsing code, once this one is fixed
+        *    to create a map per module.
+        */
+       if (machine && __machine__load_kallsyms(machine, "/proc/kallsyms", MAP__FUNCTION, true) <= 0) {
+               machine__delete(machine);
+               machine = NULL;
+       }
+
+       return machine;
+}
+
 static void dsos__purge(struct dsos *dsos)
 {
        struct dso *pos, *n;
@@ -763,7 +782,7 @@ static u64 machine__get_running_kernel_start(struct machine *machine,
 
 int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
 {
-       enum map_type type;
+       int type;
        u64 start = machine__get_running_kernel_start(machine, NULL);
 
        /* In case of renewal the kernel map, destroy previous one */
@@ -794,7 +813,7 @@ int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
 
 void machine__destroy_kernel_maps(struct machine *machine)
 {
-       enum map_type type;
+       int type;
 
        for (type = 0; type < MAP__NR_TYPES; ++type) {
                struct kmap *kmap;
@@ -1546,7 +1565,7 @@ int machine__process_event(struct machine *machine, union perf_event *event,
 
 static bool symbol__match_regex(struct symbol *sym, regex_t *regex)
 {
-       if (sym->name && !regexec(regex, sym->name, 0, NULL, 0))
+       if (!regexec(regex, sym->name, 0, NULL, 0))
                return 1;
        return 0;
 }
index 354de6e56109aa7f5ecc770ad186332d818d42c6..a28305029711c08f56c89385c85fa49611e55aa3 100644 (file)
@@ -129,6 +129,7 @@ char *machine__mmap_name(struct machine *machine, char *bf, size_t size);
 void machines__set_comm_exec(struct machines *machines, bool comm_exec);
 
 struct machine *machine__new_host(void);
+struct machine *machine__new_kallsyms(void);
 int machine__init(struct machine *machine, const char *root_dir, pid_t pid);
 void machine__exit(struct machine *machine);
 void machine__delete_threads(struct machine *machine);
index 4f9a71c63026d185e48807895f0bee39570e8598..0a943e7b1ea7fc745485f016875395e91feca8c8 100644 (file)
@@ -387,10 +387,10 @@ size_t map__fprintf_dsoname(struct map *map, FILE *fp)
 {
        const char *dsoname = "[unknown]";
 
-       if (map && map->dso && (map->dso->name || map->dso->long_name)) {
+       if (map && map->dso) {
                if (symbol_conf.show_kernel_path && map->dso->long_name)
                        dsoname = map->dso->long_name;
-               else if (map->dso->name)
+               else
                        dsoname = map->dso->name;
        }
 
index 3c876b8ba4de68afaf312e4bc6df07a4b58f0211..281e44af31e2fb0b2e32928290f3c4cdfd03cccf 100644 (file)
@@ -211,6 +211,8 @@ struct tracepoint_path *tracepoint_id_to_path(u64 config)
                                closedir(evt_dir);
                                closedir(sys_dir);
                                path = zalloc(sizeof(*path));
+                               if (!path)
+                                       return NULL;
                                path->system = malloc(MAX_EVENT_LENGTH);
                                if (!path->system) {
                                        free(path);
@@ -252,8 +254,7 @@ struct tracepoint_path *tracepoint_name_to_path(const char *name)
        if (path->system == NULL || path->name == NULL) {
                zfree(&path->system);
                zfree(&path->name);
-               free(path);
-               path = NULL;
+               zfree(&path);
        }
 
        return path;
@@ -310,10 +311,11 @@ __add_event(struct list_head *list, int *idx,
 
        event_attr_init(attr);
 
-       evsel = perf_evsel__new_idx(attr, (*idx)++);
+       evsel = perf_evsel__new_idx(attr, *idx);
        if (!evsel)
                return NULL;
 
+       (*idx)++;
        evsel->cpus     = cpu_map__get(cpus);
        evsel->own_cpus = cpu_map__get(cpus);
 
@@ -1477,10 +1479,9 @@ static void perf_pmu__parse_cleanup(void)
 
                for (i = 0; i < perf_pmu_events_list_num; i++) {
                        p = perf_pmu_events_list + i;
-                       free(p->symbol);
+                       zfree(&p->symbol);
                }
-               free(perf_pmu_events_list);
-               perf_pmu_events_list = NULL;
+               zfree(&perf_pmu_events_list);
                perf_pmu_events_list_num = 0;
        }
 }
@@ -1504,35 +1505,41 @@ static void perf_pmu__parse_init(void)
        struct perf_pmu_alias *alias;
        int len = 0;
 
-       pmu = perf_pmu__find("cpu");
-       if ((pmu == NULL) || list_empty(&pmu->aliases)) {
+       pmu = NULL;
+       while ((pmu = perf_pmu__scan(pmu)) != NULL) {
+               list_for_each_entry(alias, &pmu->aliases, list) {
+                       if (strchr(alias->name, '-'))
+                               len++;
+                       len++;
+               }
+       }
+
+       if (len == 0) {
                perf_pmu_events_list_num = -1;
                return;
        }
-       list_for_each_entry(alias, &pmu->aliases, list) {
-               if (strchr(alias->name, '-'))
-                       len++;
-               len++;
-       }
        perf_pmu_events_list = malloc(sizeof(struct perf_pmu_event_symbol) * len);
        if (!perf_pmu_events_list)
                return;
        perf_pmu_events_list_num = len;
 
        len = 0;
-       list_for_each_entry(alias, &pmu->aliases, list) {
-               struct perf_pmu_event_symbol *p = perf_pmu_events_list + len;
-               char *tmp = strchr(alias->name, '-');
-
-               if (tmp != NULL) {
-                       SET_SYMBOL(strndup(alias->name, tmp - alias->name),
-                                       PMU_EVENT_SYMBOL_PREFIX);
-                       p++;
-                       SET_SYMBOL(strdup(++tmp), PMU_EVENT_SYMBOL_SUFFIX);
-                       len += 2;
-               } else {
-                       SET_SYMBOL(strdup(alias->name), PMU_EVENT_SYMBOL);
-                       len++;
+       pmu = NULL;
+       while ((pmu = perf_pmu__scan(pmu)) != NULL) {
+               list_for_each_entry(alias, &pmu->aliases, list) {
+                       struct perf_pmu_event_symbol *p = perf_pmu_events_list + len;
+                       char *tmp = strchr(alias->name, '-');
+
+                       if (tmp != NULL) {
+                               SET_SYMBOL(strndup(alias->name, tmp - alias->name),
+                                               PMU_EVENT_SYMBOL_PREFIX);
+                               p++;
+                               SET_SYMBOL(strdup(++tmp), PMU_EVENT_SYMBOL_SUFFIX);
+                               len += 2;
+                       } else {
+                               SET_SYMBOL(strdup(alias->name), PMU_EVENT_SYMBOL);
+                               len++;
+                       }
                }
        }
        qsort(perf_pmu_events_list, len,
@@ -1563,7 +1570,7 @@ perf_pmu__parse_check(const char *name)
        r = bsearch(&p, perf_pmu_events_list,
                        (size_t) perf_pmu_events_list_num,
                        sizeof(struct perf_pmu_event_symbol), comp_pmu);
-       free(p.symbol);
+       zfree(&p.symbol);
        return r ? r->type : PMU_EVENT_SYMBOL_ERR;
 }
 
@@ -1710,8 +1717,8 @@ static void parse_events_print_error(struct parse_events_error *err,
                fprintf(stderr, "%*s\\___ %s\n", idx + 1, "", err->str);
                if (err->help)
                        fprintf(stderr, "\n%s\n", err->help);
-               free(err->str);
-               free(err->help);
+               zfree(&err->str);
+               zfree(&err->help);
        }
 
        fprintf(stderr, "Run 'perf list' for a list of valid events\n");
@@ -2013,17 +2020,14 @@ static bool is_event_supported(u8 type, unsigned config)
                .config = config,
                .disabled = 1,
        };
-       struct {
-               struct thread_map map;
-               int threads[1];
-       } tmap = {
-               .map.nr  = 1,
-               .threads = { 0 },
-       };
+       struct thread_map *tmap = thread_map__new_by_tid(0);
+
+       if (tmap == NULL)
+               return false;
 
        evsel = perf_evsel__new(&attr);
        if (evsel) {
-               open_return = perf_evsel__open(evsel, NULL, &tmap.map);
+               open_return = perf_evsel__open(evsel, NULL, tmap);
                ret = open_return >= 0;
 
                if (open_return == -EACCES) {
@@ -2035,7 +2039,7 @@ static bool is_event_supported(u8 type, unsigned config)
                         *
                         */
                        evsel->attr.exclude_kernel = 1;
-                       ret = perf_evsel__open(evsel, NULL, &tmap.map) >= 0;
+                       ret = perf_evsel__open(evsel, NULL, tmap) >= 0;
                }
                perf_evsel__delete(evsel);
        }
@@ -2406,7 +2410,7 @@ void parse_events_terms__purge(struct list_head *terms)
 
        list_for_each_entry_safe(term, h, terms, list) {
                if (term->array.nr_ranges)
-                       free(term->array.ranges);
+                       zfree(&term->array.ranges);
                list_del_init(&term->list);
                free(term);
        }
@@ -2422,7 +2426,7 @@ void parse_events_terms__delete(struct list_head *terms)
 
 void parse_events__clear_array(struct parse_events_array *a)
 {
-       free(a->ranges);
+       zfree(&a->ranges);
 }
 
 void parse_events_evlist_error(struct parse_events_evlist *data,
index 879115f93edcdc52171b42fc6e544662b579da1f..a14b47ab3879bd67db8854d495ba2e195c4f9068 100644 (file)
 #include <linux/list.h>
 #include <linux/types.h>
 #include "util.h"
+#include "pmu.h"
+#include "debug.h"
 #include "parse-events.h"
 #include "parse-events-bison.h"
 
+void parse_events_error(YYLTYPE *loc, void *data, void *scanner, char const *msg);
+
 #define ABORT_ON(val) \
 do { \
        if (val) \
@@ -236,15 +240,34 @@ PE_KERNEL_PMU_EVENT sep_dc
        struct list_head *head;
        struct parse_events_term *term;
        struct list_head *list;
+       struct perf_pmu *pmu = NULL;
+       int ok = 0;
 
-       ALLOC_LIST(head);
-       ABORT_ON(parse_events_term__num(&term, PARSE_EVENTS__TERM_TYPE_USER,
-                                       $1, 1, &@1, NULL));
-       list_add_tail(&term->list, head);
-
+       /* Add it for all PMUs that support the alias */
        ALLOC_LIST(list);
-       ABORT_ON(parse_events_add_pmu(data, list, "cpu", head));
-       parse_events_terms__delete(head);
+       while ((pmu = perf_pmu__scan(pmu)) != NULL) {
+               struct perf_pmu_alias *alias;
+
+               list_for_each_entry(alias, &pmu->aliases, list) {
+                       if (!strcasecmp(alias->name, $1)) {
+                               ALLOC_LIST(head);
+                               ABORT_ON(parse_events_term__num(&term, PARSE_EVENTS__TERM_TYPE_USER,
+                                       $1, 1, &@1, NULL));
+                               list_add_tail(&term->list, head);
+
+                               if (!parse_events_add_pmu(data, list,
+                                                 pmu->name, head)) {
+                                       pr_debug("%s -> %s/%s/\n", $1,
+                                                pmu->name, alias->str);
+                                       ok++;
+                               }
+
+                               parse_events_terms__delete(head);
+                       }
+               }
+       }
+       if (!ok)
+               YYABORT;
        $$ = list;
 }
 |
index dc6ccaa4e927485ca803f537d7b398ef016b16b0..49bfee0e3d9ed0b483180a74b7bfd96954f4968b 100644 (file)
@@ -94,32 +94,10 @@ static int pmu_format(const char *name, struct list_head *format)
        return 0;
 }
 
-static int perf_pmu__parse_scale(struct perf_pmu_alias *alias, char *dir, char *name)
+static int convert_scale(const char *scale, char **end, double *sval)
 {
-       struct stat st;
-       ssize_t sret;
-       char scale[128];
-       int fd, ret = -1;
-       char path[PATH_MAX];
        char *lc;
-
-       snprintf(path, PATH_MAX, "%s/%s.scale", dir, name);
-
-       fd = open(path, O_RDONLY);
-       if (fd == -1)
-               return -1;
-
-       if (fstat(fd, &st) < 0)
-               goto error;
-
-       sret = read(fd, scale, sizeof(scale)-1);
-       if (sret < 0)
-               goto error;
-
-       if (scale[sret - 1] == '\n')
-               scale[sret - 1] = '\0';
-       else
-               scale[sret] = '\0';
+       int ret = 0;
 
        /*
         * save current locale
@@ -134,7 +112,7 @@ static int perf_pmu__parse_scale(struct perf_pmu_alias *alias, char *dir, char *
        lc = strdup(lc);
        if (!lc) {
                ret = -ENOMEM;
-               goto error;
+               goto out;
        }
 
        /*
@@ -144,14 +122,42 @@ static int perf_pmu__parse_scale(struct perf_pmu_alias *alias, char *dir, char *
         */
        setlocale(LC_NUMERIC, "C");
 
-       alias->scale = strtod(scale, NULL);
+       *sval = strtod(scale, end);
 
+out:
        /* restore locale */
        setlocale(LC_NUMERIC, lc);
-
        free(lc);
+       return ret;
+}
+
+static int perf_pmu__parse_scale(struct perf_pmu_alias *alias, char *dir, char *name)
+{
+       struct stat st;
+       ssize_t sret;
+       char scale[128];
+       int fd, ret = -1;
+       char path[PATH_MAX];
+
+       snprintf(path, PATH_MAX, "%s/%s.scale", dir, name);
+
+       fd = open(path, O_RDONLY);
+       if (fd == -1)
+               return -1;
+
+       if (fstat(fd, &st) < 0)
+               goto error;
+
+       sret = read(fd, scale, sizeof(scale)-1);
+       if (sret < 0)
+               goto error;
 
-       ret = 0;
+       if (scale[sret - 1] == '\n')
+               scale[sret - 1] = '\0';
+       else
+               scale[sret] = '\0';
+
+       ret = convert_scale(scale, NULL, &alias->scale);
 error:
        close(fd);
        return ret;
@@ -223,11 +229,13 @@ static int perf_pmu__parse_snapshot(struct perf_pmu_alias *alias,
 }
 
 static int __perf_pmu__new_alias(struct list_head *list, char *dir, char *name,
-                                char *desc, char *val, char *long_desc,
-                                char *topic)
+                                char *desc, char *val,
+                                char *long_desc, char *topic,
+                                char *unit, char *perpkg)
 {
        struct perf_pmu_alias *alias;
        int ret;
+       int num;
 
        alias = malloc(sizeof(*alias));
        if (!alias)
@@ -261,6 +269,13 @@ static int __perf_pmu__new_alias(struct list_head *list, char *dir, char *name,
        alias->long_desc = long_desc ? strdup(long_desc) :
                                desc ? strdup(desc) : NULL;
        alias->topic = topic ? strdup(topic) : NULL;
+       if (unit) {
+               if (convert_scale(unit, &unit, &alias->scale) < 0)
+                       return -1;
+               snprintf(alias->unit, sizeof(alias->unit), "%s", unit);
+       }
+       alias->per_pkg = perpkg && sscanf(perpkg, "%d", &num) == 1 && num == 1;
+       alias->str = strdup(val);
 
        list_add_tail(&alias->list, list);
 
@@ -278,7 +293,8 @@ static int perf_pmu__new_alias(struct list_head *list, char *dir, char *name, FI
 
        buf[ret] = 0;
 
-       return __perf_pmu__new_alias(list, dir, name, NULL, buf, NULL, NULL);
+       return __perf_pmu__new_alias(list, dir, name, NULL, buf, NULL, NULL, NULL,
+                                    NULL);
 }
 
 static inline bool pmu_alias_info_file(char *name)
@@ -498,7 +514,7 @@ char * __weak get_cpuid_str(void)
  * to the current running CPU. Then, add all PMU events from that table
  * as aliases.
  */
-static void pmu_add_cpu_aliases(struct list_head *head)
+static void pmu_add_cpu_aliases(struct list_head *head, const char *name)
 {
        int i;
        struct pmu_events_map *map;
@@ -534,14 +550,21 @@ static void pmu_add_cpu_aliases(struct list_head *head)
         */
        i = 0;
        while (1) {
+               const char *pname;
+
                pe = &map->table[i++];
                if (!pe->name)
                        break;
 
+               pname = pe->pmu ? pe->pmu : "cpu";
+               if (strncmp(pname, name, strlen(pname)))
+                       continue;
+
                /* need type casts to override 'const' */
                __perf_pmu__new_alias(head, NULL, (char *)pe->name,
                                (char *)pe->desc, (char *)pe->event,
-                               (char *)pe->long_desc, (char *)pe->topic);
+                               (char *)pe->long_desc, (char *)pe->topic,
+                               (char *)pe->unit, (char *)pe->perpkg);
        }
 
 out:
@@ -569,15 +592,16 @@ static struct perf_pmu *pmu_lookup(const char *name)
        if (pmu_format(name, &format))
                return NULL;
 
-       if (pmu_aliases(name, &aliases))
+       /*
+        * Check the type first to avoid unnecessary work.
+        */
+       if (pmu_type(name, &type))
                return NULL;
 
-       if (!strcmp(name, "cpu"))
-               pmu_add_cpu_aliases(&aliases);
-
-       if (pmu_type(name, &type))
+       if (pmu_aliases(name, &aliases))
                return NULL;
 
+       pmu_add_cpu_aliases(&aliases, name);
        pmu = zalloc(sizeof(*pmu));
        if (!pmu)
                return NULL;
@@ -921,12 +945,12 @@ static int check_info_data(struct perf_pmu_alias *alias,
         * define unit, scale and snapshot, fail
         * if there's more than one.
         */
-       if ((info->unit && alias->unit) ||
+       if ((info->unit && alias->unit[0]) ||
            (info->scale && alias->scale) ||
            (info->snapshot && alias->snapshot))
                return -EINVAL;
 
-       if (alias->unit)
+       if (alias->unit[0])
                info->unit = alias->unit;
 
        if (alias->scale)
@@ -1065,6 +1089,8 @@ struct sevent {
        char *name;
        char *desc;
        char *topic;
+       char *str;
+       char *pmu;
 };
 
 static int cmp_sevent(const void *a, const void *b)
@@ -1161,6 +1187,8 @@ void print_pmu_events(const char *event_glob, bool name_only, bool quiet_flag,
                        aliases[j].desc = long_desc ? alias->long_desc :
                                                alias->desc;
                        aliases[j].topic = alias->topic;
+                       aliases[j].str = alias->str;
+                       aliases[j].pmu = pmu->name;
                        j++;
                }
                if (pmu->selectable &&
@@ -1175,6 +1203,9 @@ void print_pmu_events(const char *event_glob, bool name_only, bool quiet_flag,
        len = j;
        qsort(aliases, len, sizeof(struct sevent), cmp_sevent);
        for (j = 0; j < len; j++) {
+               /* Skip duplicates */
+               if (j > 0 && !strcmp(aliases[j].name, aliases[j - 1].name))
+                       continue;
                if (name_only) {
                        printf("%s ", aliases[j].name);
                        continue;
@@ -1192,6 +1223,8 @@ void print_pmu_events(const char *event_glob, bool name_only, bool quiet_flag,
                        printf("%*s", 8, "[");
                        wordwrap(aliases[j].desc, 8, columns, 0);
                        printf("]\n");
+                       if (verbose)
+                               printf("%*s%s/%s/\n", 8, "", aliases[j].pmu, aliases[j].str);
                } else
                        printf("  %-50s [Kernel PMU event]\n", aliases[j].name);
                printed++;
index 25712034c815ba92856a03b5d6164ff158a9ad5c..00852ddc7741b068e212adf2eb3e5e2b0c56c872 100644 (file)
@@ -43,6 +43,7 @@ struct perf_pmu_alias {
        char *desc;
        char *long_desc;
        char *topic;
+       char *str;
        struct list_head terms; /* HEAD struct parse_events_term -> list */
        struct list_head list;  /* ELEM */
        char unit[UNIT_MAX_LEN+1];
index 6a6f44dd594bc4c6275694335ebaa02b22118982..35f5b7b7715c39e054591644d88b369bbf35de5e 100644 (file)
@@ -2061,7 +2061,7 @@ static int find_perf_probe_point_from_map(struct probe_trace_point *tp,
                                          bool is_kprobe)
 {
        struct symbol *sym = NULL;
-       struct map *map;
+       struct map *map = NULL;
        u64 addr = tp->address;
        int ret = -ENOENT;
 
@@ -3023,20 +3023,17 @@ static int try_to_find_absolute_address(struct perf_probe_event *pev,
 
        tev->nargs = pev->nargs;
        tev->args = zalloc(sizeof(struct probe_trace_arg) * tev->nargs);
-       if (!tev->args) {
-               err = -ENOMEM;
+       if (!tev->args)
                goto errout;
-       }
+
        for (i = 0; i < tev->nargs; i++)
                copy_to_probe_trace_arg(&tev->args[i], &pev->args[i]);
 
        return 1;
 
 errout:
-       if (*tevs) {
-               clear_probe_trace_events(*tevs, 1);
-               *tevs = NULL;
-       }
+       clear_probe_trace_events(*tevs, 1);
+       *tevs = NULL;
        return err;
 }
 
index 6516e220c24752fe729b17ddaf94115d6795f90a..82d28c67e0f3b7630d939e0a71fdad80a1be6dbd 100644 (file)
@@ -1,6 +1,6 @@
 libperf-$(CONFIG_LIBPERL)   += trace-event-perl.o
 libperf-$(CONFIG_LIBPYTHON) += trace-event-python.o
 
-CFLAGS_trace-event-perl.o += $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow -Wno-undef -Wno-switch-default
+CFLAGS_trace-event-perl.o += $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow -Wno-nested-externs -Wno-undef -Wno-switch-default
 
 CFLAGS_trace-event-python.o += $(PYTHON_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow
index e55a132f69b73e23661bf1a6e21aaac36cbb85ad..c1555fd0035a0f01632312be2488710afe6b7556 100644 (file)
@@ -309,10 +309,10 @@ static SV *perl_process_callchain(struct perf_sample *sample,
                if (node->map) {
                        struct map *map = node->map;
                        const char *dsoname = "[unknown]";
-                       if (map && map->dso && (map->dso->name || map->dso->long_name)) {
+                       if (map && map->dso) {
                                if (symbol_conf.show_kernel_path && map->dso->long_name)
                                        dsoname = map->dso->long_name;
-                               else if (map->dso->name)
+                               else
                                        dsoname = map->dso->name;
                        }
                        if (!hv_stores(elem, "dso", newSVpv(dsoname,0))) {
@@ -350,8 +350,10 @@ static void perl_process_tracepoint(struct perf_sample *sample,
        if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
                return;
 
-       if (!event)
-               die("ug! no event found for type %" PRIu64, (u64)evsel->attr.config);
+       if (!event) {
+               pr_debug("ug! no event found for type %" PRIu64, (u64)evsel->attr.config);
+               return;
+       }
 
        pid = raw_field_value(event, "common_pid", data);
 
index f268201048a0b69774cef2e6a8f4cb4a1634de01..4cdbc8f5f14dbf0dff51304ffb59812ce7273ea6 100644 (file)
@@ -1191,7 +1191,7 @@ static int
        u64 sample_type = evsel->attr.sample_type;
        u64 read_format = evsel->attr.read_format;
 
-       /* Standard sample delievery. */
+       /* Standard sample delivery. */
        if (!(sample_type & PERF_SAMPLE_READ))
                return tool->sample(tool, event, sample, evsel, machine);
 
@@ -1901,7 +1901,7 @@ int maps__set_kallsyms_ref_reloc_sym(struct map **maps,
                                     const char *symbol_name, u64 addr)
 {
        char *bracket;
-       enum map_type i;
+       int i;
        struct ref_reloc_sym *ref;
 
        ref = zalloc(sizeof(struct ref_reloc_sym));
index bcae659b65462cddff5c03c2c41a8fc675ad05bc..efb53772e0ecc8e6a9c9ea4493d37c6dc55d38bf 100644 (file)
@@ -269,6 +269,7 @@ static int strfilter_node__sprint(struct strfilter_node *node, char *buf)
                len = strfilter_node__sprint_pt(node->l, buf);
                if (len < 0)
                        return len;
+               __fallthrough;
        case '!':
                if (buf) {
                        *(buf + len++) = *node->p;
index d8dfaf64b32e19af441b121803e4b4418d31c69e..bddca519dd5854a0fd3319e85d54d7e638514b47 100644 (file)
@@ -21,6 +21,8 @@ s64 perf_atoll(const char *str)
                case 'b': case 'B':
                        if (*p)
                                goto out_err;
+
+                       __fallthrough;
                case '\0':
                        return length;
                default:
index dc93940de351540b89af3dfef487572e84d1f3f8..70e389bc4af71aa8f18ae67507fb65b5093a7f98 100644 (file)
@@ -1460,9 +1460,11 @@ int dso__load(struct dso *dso, struct map *map)
         * DSO_BINARY_TYPE__BUILDID_DEBUGINFO to work
         */
        if (!dso->has_build_id &&
-           is_regular_file(dso->long_name) &&
-           filename__read_build_id(dso->long_name, build_id, BUILD_ID_SIZE) > 0)
+           is_regular_file(dso->long_name)) {
+           __symbol__join_symfs(name, PATH_MAX, dso->long_name);
+           if (filename__read_build_id(name, build_id, BUILD_ID_SIZE) > 0)
                dso__set_build_id(dso, build_id);
+       }
 
        /*
         * Iterate over candidate debug images.
index 7c6b33e8e2d24d250379700703088afb332a9fa9..63694e174e5c3c2ff9828fa14577f524cdb70f91 100644 (file)
@@ -21,7 +21,7 @@ size_t __symbol__fprintf_symname_offs(const struct symbol *sym,
        unsigned long offset;
        size_t length;
 
-       if (sym && sym->name) {
+       if (sym) {
                length = fprintf(fp, "%s", sym->name);
                if (al && print_offsets) {
                        if (al->addr < sym->end)
index f9eab200fd757ee0f6824cad0b2bbe9316afbec1..7c3fcc538a705f9df580c33b6444512547b3f453 100644 (file)
@@ -93,7 +93,7 @@ struct thread_map *thread_map__new_by_uid(uid_t uid)
 {
        DIR *proc;
        int max_threads = 32, items, i;
-       char path[256];
+       char path[NAME_MAX + 1 + 6];
        struct dirent *dirent, **namelist = NULL;
        struct thread_map *threads = thread_map__alloc(max_threads);
 
index d995743cb673e77524f0cd539d0cf0042b3fc363..e7d60d05596d2ff54aa6f8ec1d2a0b8afcd17546 100644 (file)
@@ -42,7 +42,7 @@
 #include "evsel.h"
 #include "debug.h"
 
-#define VERSION "0.5"
+#define VERSION "0.6"
 
 static int output_fd;
 
@@ -170,6 +170,12 @@ static bool name_in_tp_list(char *sys, struct tracepoint_path *tps)
        return false;
 }
 
+#define for_each_event(dir, dent, tps)                         \
+       while ((dent = readdir(dir)))                           \
+               if (dent->d_type == DT_DIR &&                   \
+                   (strcmp(dent->d_name, ".")) &&              \
+                   (strcmp(dent->d_name, "..")))               \
+
 static int copy_event_system(const char *sys, struct tracepoint_path *tps)
 {
        struct dirent *dent;
@@ -186,12 +192,10 @@ static int copy_event_system(const char *sys, struct tracepoint_path *tps)
                return -errno;
        }
 
-       while ((dent = readdir(dir))) {
-               if (dent->d_type != DT_DIR ||
-                   strcmp(dent->d_name, ".") == 0 ||
-                   strcmp(dent->d_name, "..") == 0 ||
-                   !name_in_tp_list(dent->d_name, tps))
+       for_each_event(dir, dent, tps) {
+               if (!name_in_tp_list(dent->d_name, tps))
                        continue;
+
                if (asprintf(&format, "%s/%s/format", sys, dent->d_name) < 0) {
                        err = -ENOMEM;
                        goto out;
@@ -210,12 +214,10 @@ static int copy_event_system(const char *sys, struct tracepoint_path *tps)
        }
 
        rewinddir(dir);
-       while ((dent = readdir(dir))) {
-               if (dent->d_type != DT_DIR ||
-                   strcmp(dent->d_name, ".") == 0 ||
-                   strcmp(dent->d_name, "..") == 0 ||
-                   !name_in_tp_list(dent->d_name, tps))
+       for_each_event(dir, dent, tps) {
+               if (!name_in_tp_list(dent->d_name, tps))
                        continue;
+
                if (asprintf(&format, "%s/%s/format", sys, dent->d_name) < 0) {
                        err = -ENOMEM;
                        goto out;
@@ -290,13 +292,11 @@ static int record_event_files(struct tracepoint_path *tps)
                goto out;
        }
 
-       while ((dent = readdir(dir))) {
-               if (dent->d_type != DT_DIR ||
-                   strcmp(dent->d_name, ".") == 0 ||
-                   strcmp(dent->d_name, "..") == 0 ||
-                   strcmp(dent->d_name, "ftrace") == 0 ||
+       for_each_event(dir, dent, tps) {
+               if (strcmp(dent->d_name, "ftrace") == 0 ||
                    !system_in_tp_list(dent->d_name, tps))
                        continue;
+
                count++;
        }
 
@@ -307,13 +307,11 @@ static int record_event_files(struct tracepoint_path *tps)
        }
 
        rewinddir(dir);
-       while ((dent = readdir(dir))) {
-               if (dent->d_type != DT_DIR ||
-                   strcmp(dent->d_name, ".") == 0 ||
-                   strcmp(dent->d_name, "..") == 0 ||
-                   strcmp(dent->d_name, "ftrace") == 0 ||
+       for_each_event(dir, dent, tps) {
+               if (strcmp(dent->d_name, "ftrace") == 0 ||
                    !system_in_tp_list(dent->d_name, tps))
                        continue;
+
                if (asprintf(&sys, "%s/%s", path, dent->d_name) < 0) {
                        err = -ENOMEM;
                        goto out;
@@ -379,6 +377,34 @@ out:
        return err;
 }
 
+static int record_saved_cmdline(void)
+{
+       unsigned int size;
+       char *path;
+       struct stat st;
+       int ret, err = 0;
+
+       path = get_tracing_file("saved_cmdlines");
+       if (!path) {
+               pr_debug("can't get tracing/saved_cmdline");
+               return -ENOMEM;
+       }
+
+       ret = stat(path, &st);
+       if (ret < 0) {
+               /* not found */
+               size = 0;
+               if (write(output_fd, &size, 8) != 8)
+                       err = -EIO;
+               goto out;
+       }
+       err = record_file(path, 8);
+
+out:
+       put_tracing_file(path);
+       return err;
+}
+
 static void
 put_tracepoints_path(struct tracepoint_path *tps)
 {
@@ -539,6 +565,9 @@ struct tracing_data *tracing_data_get(struct list_head *pattrs,
        if (err)
                goto out;
        err = record_ftrace_printk();
+       if (err)
+               goto out;
+       err = record_saved_cmdline();
 
 out:
        /*
index 33b52eaa39db293b0de4d41c7312f88b39a2c4e2..de0078e21408149fcf1105e357a3d829343233ff 100644 (file)
@@ -160,6 +160,23 @@ void parse_ftrace_printk(struct pevent *pevent,
        }
 }
 
+void parse_saved_cmdline(struct pevent *pevent,
+                        char *file, unsigned int size __maybe_unused)
+{
+       char *comm;
+       char *line;
+       char *next = NULL;
+       int pid;
+
+       line = strtok_r(file, "\n", &next);
+       while (line) {
+               sscanf(line, "%d %ms", &pid, &comm);
+               pevent_register_comm(pevent, comm, pid);
+               free(comm);
+               line = strtok_r(NULL, "\n", &next);
+       }
+}
+
 int parse_ftrace_file(struct pevent *pevent, char *buf, unsigned long size)
 {
        return pevent_parse_event(pevent, buf, size, "ftrace");
index b67a0ccf5ab94991653f6666dd4766864d282b3b..27420159bf6944df76eb76a45943a4810e0ba972 100644 (file)
@@ -260,39 +260,53 @@ static int read_header_files(struct pevent *pevent)
 
 static int read_ftrace_file(struct pevent *pevent, unsigned long long size)
 {
+       int ret;
        char *buf;
 
        buf = malloc(size);
-       if (buf == NULL)
+       if (buf == NULL) {
+               pr_debug("memory allocation failure\n");
                return -1;
+       }
 
-       if (do_read(buf, size) < 0) {
-               free(buf);
-               return -1;
+       ret = do_read(buf, size);
+       if (ret < 0) {
+               pr_debug("error reading ftrace file.\n");
+               goto out;
        }
 
-       parse_ftrace_file(pevent, buf, size);
+       ret = parse_ftrace_file(pevent, buf, size);
+       if (ret < 0)
+               pr_debug("error parsing ftrace file.\n");
+out:
        free(buf);
-       return 0;
+       return ret;
 }
 
 static int read_event_file(struct pevent *pevent, char *sys,
                            unsigned long long size)
 {
+       int ret;
        char *buf;
 
        buf = malloc(size);
-       if (buf == NULL)
+       if (buf == NULL) {
+               pr_debug("memory allocation failure\n");
                return -1;
+       }
 
-       if (do_read(buf, size) < 0) {
+       ret = do_read(buf, size);
+       if (ret < 0) {
                free(buf);
-               return -1;
+               goto out;
        }
 
-       parse_event_file(pevent, buf, size, sys);
+       ret = parse_event_file(pevent, buf, size, sys);
+       if (ret < 0)
+               pr_debug("error parsing event file.\n");
+out:
        free(buf);
-       return 0;
+       return ret;
 }
 
 static int read_ftrace_files(struct pevent *pevent)
@@ -341,6 +355,36 @@ static int read_event_files(struct pevent *pevent)
        return 0;
 }
 
+static int read_saved_cmdline(struct pevent *pevent)
+{
+       unsigned long long size;
+       char *buf;
+       int ret;
+
+       /* it can have 0 size */
+       size = read8(pevent);
+       if (!size)
+               return 0;
+
+       buf = malloc(size + 1);
+       if (buf == NULL) {
+               pr_debug("memory allocation failure\n");
+               return -1;
+       }
+
+       ret = do_read(buf, size);
+       if (ret < 0) {
+               pr_debug("error reading saved cmdlines\n");
+               goto out;
+       }
+
+       parse_saved_cmdline(pevent, buf, size);
+       ret = 0;
+out:
+       free(buf);
+       return ret;
+}
+
 ssize_t trace_report(int fd, struct trace_event *tevent, bool __repipe)
 {
        char buf[BUFSIZ];
@@ -379,10 +423,11 @@ ssize_t trace_report(int fd, struct trace_event *tevent, bool __repipe)
                return -1;
        if (show_version)
                printf("version = %s\n", version);
-       free(version);
 
-       if (do_read(buf, 1) < 0)
+       if (do_read(buf, 1) < 0) {
+               free(version);
                return -1;
+       }
        file_bigendian = buf[0];
        host_bigendian = bigendian();
 
@@ -423,6 +468,11 @@ ssize_t trace_report(int fd, struct trace_event *tevent, bool __repipe)
        err = read_ftrace_printk(pevent);
        if (err)
                goto out;
+       if (atof(version) >= 0.6) {
+               err = read_saved_cmdline(pevent);
+               if (err)
+                       goto out;
+       }
 
        size = trace_data_size;
        repipe = false;
@@ -438,5 +488,6 @@ ssize_t trace_report(int fd, struct trace_event *tevent, bool __repipe)
 out:
        if (pevent)
                trace_event__cleanup(tevent);
+       free(version);
        return size;
 }
index b0af9c81bb0df292ff798b9012971ee347b2dc1f..1fbc044f9eb039c84d5fe76416575c2f7753a43b 100644 (file)
@@ -42,6 +42,7 @@ raw_field_value(struct event_format *event, const char *name, void *data);
 
 void parse_proc_kallsyms(struct pevent *pevent, char *file, unsigned int size);
 void parse_ftrace_printk(struct pevent *pevent, char *file, unsigned int size);
+void parse_saved_cmdline(struct pevent *pevent, char *file, unsigned int size);
 
 ssize_t trace_report(int fd, struct trace_event *tevent, bool repipe);
 
index 6fec84dff3f777ac51f4f7201e2fb97d849d2aaf..bfb9b7987692ee583104c54f7756c43ca13cf52e 100644 (file)
@@ -35,6 +35,7 @@
 #include "util.h"
 #include "debug.h"
 #include "asm/bug.h"
+#include "dso.h"
 
 extern int
 UNW_OBJ(dwarf_search_unwind_table) (unw_addr_space_t as,
@@ -297,15 +298,58 @@ static int read_unwind_spec_debug_frame(struct dso *dso,
        int fd;
        u64 ofs = dso->data.debug_frame_offset;
 
+       /* debug_frame can reside in:
+        *  - dso
+        *  - debug pointed by symsrc_filename
+        *  - gnu_debuglink, which doesn't necessary
+        *    has to be pointed by symsrc_filename
+        */
        if (ofs == 0) {
                fd = dso__data_get_fd(dso, machine);
-               if (fd < 0)
-                       return -EINVAL;
+               if (fd >= 0) {
+                       ofs = elf_section_offset(fd, ".debug_frame");
+                       dso__data_put_fd(dso);
+               }
+
+               if (ofs <= 0) {
+                       fd = open(dso->symsrc_filename, O_RDONLY);
+                       if (fd >= 0) {
+                               ofs = elf_section_offset(fd, ".debug_frame");
+                               close(fd);
+                       }
+               }
+
+               if (ofs <= 0) {
+                       char *debuglink = malloc(PATH_MAX);
+                       int ret = 0;
+
+                       ret = dso__read_binary_type_filename(
+                               dso, DSO_BINARY_TYPE__DEBUGLINK,
+                               machine->root_dir, debuglink, PATH_MAX);
+                       if (!ret) {
+                               fd = open(debuglink, O_RDONLY);
+                               if (fd >= 0) {
+                                       ofs = elf_section_offset(fd,
+                                                       ".debug_frame");
+                                       close(fd);
+                               }
+                       }
+                       if (ofs > 0) {
+                               if (dso->symsrc_filename != NULL) {
+                                       pr_warning(
+                                               "%s: overwrite symsrc(%s,%s)\n",
+                                                       __func__,
+                                                       dso->symsrc_filename,
+                                                       debuglink);
+                                       free(dso->symsrc_filename);
+                               }
+                               dso->symsrc_filename = debuglink;
+                       } else {
+                               free(debuglink);
+                       }
+               }
 
-               /* Check the .debug_frame section for unwinding info */
-               ofs = elf_section_offset(fd, ".debug_frame");
                dso->data.debug_frame_offset = ofs;
-               dso__data_put_fd(dso);
        }
 
        *offset = ofs;
index 9ddd98827d12dffa0b4482deb72be3749dc33145..d8b45cea54d08233c3d743cc38c6c0ef7af90328 100644 (file)
@@ -85,7 +85,7 @@ int mkdir_p(char *path, mode_t mode)
        return (stat(path, &st) && mkdir(path, mode)) ? -1 : 0;
 }
 
-int rm_rf(char *path)
+int rm_rf(const char *path)
 {
        DIR *dir;
        int ret = 0;
@@ -789,3 +789,16 @@ int is_printable_array(char *p, unsigned int len)
        }
        return 1;
 }
+
+int unit_number__scnprintf(char *buf, size_t size, u64 n)
+{
+       char unit[4] = "BKMG";
+       int i = 0;
+
+       while (((n / 1024) > 1) && (i < 3)) {
+               n /= 1024;
+               i++;
+       }
+
+       return scnprintf(buf, size, "%" PRIu64 "%c", n, unit[i]);
+}
index 1d639e38aa82d0b495183af4dd36a0fdf3f47542..c74708da857129356e2a308cd987dec34f371104 100644 (file)
@@ -209,7 +209,7 @@ static inline int sane_case(int x, int high)
 }
 
 int mkdir_p(char *path, mode_t mode);
-int rm_rf(char *path);
+int rm_rf(const char *path);
 struct strlist *lsdir(const char *name, bool (*filter)(const char *, struct dirent *));
 bool lsdir_no_dot_filter(const char *name, struct dirent *d);
 int copyfile(const char *from, const char *to);
@@ -363,4 +363,5 @@ int is_printable_array(char *p, unsigned int len);
 
 int timestamp__scnprintf_usec(u64 timestamp, char *buf, size_t sz);
 
+int unit_number__scnprintf(char *buf, size_t size, u64 n);
 #endif /* GIT_COMPAT_UTIL_H */
diff --git a/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py b/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py
new file mode 100755 (executable)
index 0000000..fd706ac
--- /dev/null
@@ -0,0 +1,569 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+""" This utility can be used to debug and tune the performance of the
+intel_pstate driver. This utility can be used in two ways:
+- If there is Linux trace file with pstate_sample events enabled, then
+this utility can parse the trace file and generate performance plots.
+- If user has not specified a trace file as input via command line parameters,
+then this utility enables and collects trace data for a user specified interval
+and generates performance plots.
+
+Prerequisites:
+    Python version 2.7.x
+    gnuplot 5.0 or higher
+    gnuplot-py 1.8
+    (Most of the distributions have these required packages. They may be called
+     gnuplot-py, phython-gnuplot. )
+
+    HWP (Hardware P-States are disabled)
+    Kernel config for Linux trace is enabled
+
+    see print_help(): for Usage and Output details
+
+"""
+from __future__ import print_function
+from datetime import datetime
+import subprocess
+import os
+import time
+import re
+import sys
+import getopt
+import Gnuplot
+from numpy import *
+from decimal import *
+
+__author__ = "Srinivas Pandruvada"
+__copyright__ = " Copyright (c) 2017, Intel Corporation. "
+__license__ = "GPL version 2"
+
+
+MAX_CPUS = 256
+
+# Define the csv file columns
+C_COMM = 18
+C_GHZ = 17
+C_ELAPSED = 16
+C_SAMPLE = 15
+C_DURATION = 14
+C_LOAD = 13
+C_BOOST = 12
+C_FREQ = 11
+C_TSC = 10
+C_APERF = 9
+C_MPERF = 8
+C_TO = 7
+C_FROM = 6
+C_SCALED = 5
+C_CORE = 4
+C_USEC = 3
+C_SEC = 2
+C_CPU = 1
+
+global sample_num, last_sec_cpu, last_usec_cpu, start_time, testname
+
+# 11 digits covers uptime to 115 days
+getcontext().prec = 11
+
+sample_num =0
+last_sec_cpu = [0] * MAX_CPUS
+last_usec_cpu = [0] * MAX_CPUS
+
+def print_help():
+    print('intel_pstate_tracer.py:')
+    print('  Usage:')
+    print('    If the trace file is available, then to simply parse and plot, use (sudo not required):')
+    print('      ./intel_pstate_tracer.py [-c cpus] -t <trace_file> -n <test_name>')
+    print('    Or')
+    print('      ./intel_pstate_tracer.py [--cpu cpus] ---trace_file <trace_file> --name <test_name>')
+    print('    To generate trace file, parse and plot, use (sudo required):')
+    print('      sudo ./intel_pstate_tracer.py [-c cpus] -i <interval> -n <test_name>')
+    print('    Or')
+    print('      sudo ./intel_pstate_tracer.py [--cpu cpus] --interval <interval> --name <test_name>')
+    print('    Optional argument:')
+    print('      cpus:  comma separated list of CPUs')
+    print('  Output:')
+    print('    If not already present, creates a "results/test_name" folder in the current working directory with:')
+    print('      cpu.csv - comma seperated values file with trace contents and some additional calculations.')
+    print('      cpu???.csv - comma seperated values file for CPU number ???.')
+    print('      *.png - a variety of PNG format plot files created from the trace contents and the additional calculations.')
+    print('  Notes:')
+    print('    Avoid the use of _ (underscore) in test names, because in gnuplot it is a subscript directive.')
+    print('    Maximum number of CPUs is {0:d}. If there are more the script will abort with an error.'.format(MAX_CPUS))
+    print('    Off-line CPUs cause the script to list some warnings, and create some empty files. Use the CPU mask feature for a clean run.')
+    print('    Empty y range warnings for autoscaled plots can occur and can be ignored.')
+
+def plot_perf_busy_with_sample(cpu_index):
+    """ Plot method to per cpu information """
+
+    file_name = 'cpu{:0>3}.csv'.format(cpu_index)
+    if os.path.exists(file_name):
+        output_png = "cpu%03d_perf_busy_vs_samples.png" % cpu_index
+        g_plot = common_all_gnuplot_settings(output_png)
+        g_plot('set yrange [0:40]')
+        g_plot('set y2range [0:200]')
+        g_plot('set y2tics 0, 10')
+        g_plot('set title "{} : cpu perf busy vs. sample : CPU {:0>3} : {:%F %H:%M}"'.format(testname, cpu_index, datetime.now()))
+#       Override common
+        g_plot('set xlabel "Samples"')
+        g_plot('set ylabel "P-State"')
+        g_plot('set y2label "Scaled Busy/performance/io-busy(%)"')
+        set_4_plot_linestyles(g_plot)
+        g_plot('plot "' + file_name + '" using {:d}:{:d} with linespoints linestyle 1 axis x1y2 title "performance",\\'.format(C_SAMPLE, C_CORE))
+        g_plot('"' + file_name + '" using {:d}:{:d} with linespoints linestyle 2 axis x1y2 title "scaled-busy",\\'.format(C_SAMPLE, C_SCALED))
+        g_plot('"' + file_name + '" using {:d}:{:d} with linespoints linestyle 3 axis x1y2 title "io-boost",\\'.format(C_SAMPLE, C_BOOST))
+        g_plot('"' + file_name + '" using {:d}:{:d} with linespoints linestyle 4 axis x1y1 title "P-State"'.format(C_SAMPLE, C_TO))
+
+def plot_perf_busy(cpu_index):
+    """ Plot some per cpu information """
+
+    file_name = 'cpu{:0>3}.csv'.format(cpu_index)
+    if os.path.exists(file_name):
+        output_png = "cpu%03d_perf_busy.png" % cpu_index
+        g_plot = common_all_gnuplot_settings(output_png)
+        g_plot('set yrange [0:40]')
+        g_plot('set y2range [0:200]')
+        g_plot('set y2tics 0, 10')
+        g_plot('set title "{} : perf busy : CPU {:0>3} : {:%F %H:%M}"'.format(testname, cpu_index, datetime.now()))
+        g_plot('set ylabel "P-State"')
+        g_plot('set y2label "Scaled Busy/performance/io-busy(%)"')
+        set_4_plot_linestyles(g_plot)
+        g_plot('plot "' + file_name + '" using {:d}:{:d} with linespoints linestyle 1 axis x1y2 title "performance",\\'.format(C_ELAPSED, C_CORE))
+        g_plot('"' + file_name + '" using {:d}:{:d} with linespoints linestyle 2 axis x1y2 title "scaled-busy",\\'.format(C_ELAPSED, C_SCALED))
+        g_plot('"' + file_name + '" using {:d}:{:d} with linespoints linestyle 3 axis x1y2 title "io-boost",\\'.format(C_ELAPSED, C_BOOST))
+        g_plot('"' + file_name + '" using {:d}:{:d} with linespoints linestyle 4 axis x1y1 title "P-State"'.format(C_ELAPSED, C_TO))
+
+def plot_durations(cpu_index):
+    """ Plot per cpu durations """
+
+    file_name = 'cpu{:0>3}.csv'.format(cpu_index)
+    if os.path.exists(file_name):
+        output_png = "cpu%03d_durations.png" % cpu_index
+        g_plot = common_all_gnuplot_settings(output_png)
+#       Should autoscale be used here? Should seconds be used here?
+        g_plot('set yrange [0:5000]')
+        g_plot('set ytics 0, 500')
+        g_plot('set title "{} : durations : CPU {:0>3} : {:%F %H:%M}"'.format(testname, cpu_index, datetime.now()))
+        g_plot('set ylabel "Timer Duration (MilliSeconds)"')
+#       override common
+        g_plot('set key off')
+        set_4_plot_linestyles(g_plot)
+        g_plot('plot "' + file_name + '" using {:d}:{:d} with linespoints linestyle 1 axis x1y1'.format(C_ELAPSED, C_DURATION))
+
+def plot_loads(cpu_index):
+    """ Plot per cpu loads """
+
+    file_name = 'cpu{:0>3}.csv'.format(cpu_index)
+    if os.path.exists(file_name):
+        output_png = "cpu%03d_loads.png" % cpu_index
+        g_plot = common_all_gnuplot_settings(output_png)
+        g_plot('set yrange [0:100]')
+        g_plot('set ytics 0, 10')
+        g_plot('set title "{} : loads : CPU {:0>3} : {:%F %H:%M}"'.format(testname, cpu_index, datetime.now()))
+        g_plot('set ylabel "CPU load (percent)"')
+#       override common
+        g_plot('set key off')
+        set_4_plot_linestyles(g_plot)
+        g_plot('plot "' + file_name + '" using {:d}:{:d} with linespoints linestyle 1 axis x1y1'.format(C_ELAPSED, C_LOAD))
+
+def plot_pstate_cpu_with_sample():
+    """ Plot all cpu information """
+
+    if os.path.exists('cpu.csv'):
+        output_png = 'all_cpu_pstates_vs_samples.png'
+        g_plot = common_all_gnuplot_settings(output_png)
+        g_plot('set yrange [0:40]')
+#       override common
+        g_plot('set xlabel "Samples"')
+        g_plot('set ylabel "P-State"')
+        g_plot('set title "{} : cpu pstate vs. sample : {:%F %H:%M}"'.format(testname, datetime.now()))
+        title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ')
+        plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_SAMPLE, C_TO)
+        g_plot('title_list = "{}"'.format(title_list))
+        g_plot(plot_str)
+
+def plot_pstate_cpu():
+    """ Plot all cpu information from csv files """
+
+    output_png = 'all_cpu_pstates.png'
+    g_plot = common_all_gnuplot_settings(output_png)
+    g_plot('set yrange [0:40]')
+    g_plot('set ylabel "P-State"')
+    g_plot('set title "{} : cpu pstates : {:%F %H:%M}"'.format(testname, datetime.now()))
+
+#    the following command is really cool, but doesn't work with the CPU masking option because it aborts on the first missing file.
+#    plot_str = 'plot for [i=0:*] file=sprintf("cpu%03d.csv",i) title_s=sprintf("cpu%03d",i) file using 16:7 pt 7 ps 1 title title_s'
+#
+    title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ')
+    plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_TO)
+    g_plot('title_list = "{}"'.format(title_list))
+    g_plot(plot_str)
+
+def plot_load_cpu():
+    """ Plot all cpu loads """
+
+    output_png = 'all_cpu_loads.png'
+    g_plot = common_all_gnuplot_settings(output_png)
+    g_plot('set yrange [0:100]')
+    g_plot('set ylabel "CPU load (percent)"')
+    g_plot('set title "{} : cpu loads : {:%F %H:%M}"'.format(testname, datetime.now()))
+
+    title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ')
+    plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_LOAD)
+    g_plot('title_list = "{}"'.format(title_list))
+    g_plot(plot_str)
+
+def plot_frequency_cpu():
+    """ Plot all cpu frequencies """
+
+    output_png = 'all_cpu_frequencies.png'
+    g_plot = common_all_gnuplot_settings(output_png)
+    g_plot('set yrange [0:4]')
+    g_plot('set ylabel "CPU Frequency (GHz)"')
+    g_plot('set title "{} : cpu frequencies : {:%F %H:%M}"'.format(testname, datetime.now()))
+
+    title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ')
+    plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_FREQ)
+    g_plot('title_list = "{}"'.format(title_list))
+    g_plot(plot_str)
+
+def plot_duration_cpu():
+    """ Plot all cpu durations """
+
+    output_png = 'all_cpu_durations.png'
+    g_plot = common_all_gnuplot_settings(output_png)
+    g_plot('set yrange [0:5000]')
+    g_plot('set ytics 0, 500')
+    g_plot('set ylabel "Timer Duration (MilliSeconds)"')
+    g_plot('set title "{} : cpu durations : {:%F %H:%M}"'.format(testname, datetime.now()))
+
+    title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ')
+    plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_DURATION)
+    g_plot('title_list = "{}"'.format(title_list))
+    g_plot(plot_str)
+
+def plot_scaled_cpu():
+    """ Plot all cpu scaled busy """
+
+    output_png = 'all_cpu_scaled.png'
+    g_plot = common_all_gnuplot_settings(output_png)
+#   autoscale this one, no set y range
+    g_plot('set ylabel "Scaled Busy (Unitless)"')
+    g_plot('set title "{} : cpu scaled busy : {:%F %H:%M}"'.format(testname, datetime.now()))
+
+    title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ')
+    plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_SCALED)
+    g_plot('title_list = "{}"'.format(title_list))
+    g_plot(plot_str)
+
+def plot_boost_cpu():
+    """ Plot all cpu IO Boosts """
+
+    output_png = 'all_cpu_boost.png'
+    g_plot = common_all_gnuplot_settings(output_png)
+    g_plot('set yrange [0:100]')
+    g_plot('set ylabel "CPU IO Boost (percent)"')
+    g_plot('set title "{} : cpu io boost : {:%F %H:%M}"'.format(testname, datetime.now()))
+
+    title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ')
+    plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_BOOST)
+    g_plot('title_list = "{}"'.format(title_list))
+    g_plot(plot_str)
+
+def plot_ghz_cpu():
+    """ Plot all cpu tsc ghz """
+
+    output_png = 'all_cpu_ghz.png'
+    g_plot = common_all_gnuplot_settings(output_png)
+#   autoscale this one, no set y range
+    g_plot('set ylabel "TSC Frequency (GHz)"')
+    g_plot('set title "{} : cpu TSC Frequencies (Sanity check calculation) : {:%F %H:%M}"'.format(testname, datetime.now()))
+
+    title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ')
+    plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_GHZ)
+    g_plot('title_list = "{}"'.format(title_list))
+    g_plot(plot_str)
+
+def common_all_gnuplot_settings(output_png):
+    """ common gnuplot settings for multiple CPUs one one graph. """
+
+    g_plot = common_gnuplot_settings()
+    g_plot('set output "' + output_png + '"')
+    return(g_plot)
+
+def common_gnuplot_settings():
+    """ common gnuplot settings. """
+
+    g_plot = Gnuplot.Gnuplot(persist=1)
+#   The following line is for rigor only. It seems to be assumed for .csv files
+    g_plot('set datafile separator \",\"')
+    g_plot('set ytics nomirror')
+    g_plot('set xtics nomirror')
+    g_plot('set xtics font ", 10"')
+    g_plot('set ytics font ", 10"')
+    g_plot('set tics out scale 1.0')
+    g_plot('set grid')
+    g_plot('set key out horiz')
+    g_plot('set key bot center')
+    g_plot('set key samplen 2 spacing .8 font ", 9"')
+    g_plot('set term png size 1200, 600')
+    g_plot('set title font ", 11"')
+    g_plot('set ylabel font ", 10"')
+    g_plot('set xlabel font ", 10"')
+    g_plot('set xlabel offset 0, 0.5')
+    g_plot('set xlabel "Elapsed Time (Seconds)"')
+    return(g_plot)
+
+def set_4_plot_linestyles(g_plot):
+    """ set the linestyles used for 4 plots in 1 graphs. """
+
+    g_plot('set style line 1 linetype 1 linecolor rgb "green" pointtype -1')
+    g_plot('set style line 2 linetype 1 linecolor rgb "red" pointtype -1')
+    g_plot('set style line 3 linetype 1 linecolor rgb "purple" pointtype -1')
+    g_plot('set style line 4 linetype 1 linecolor rgb "blue" pointtype -1')
+
+def store_csv(cpu_int, time_pre_dec, time_post_dec, core_busy, scaled, _from, _to, mperf, aperf, tsc, freq_ghz, io_boost, common_comm, load, duration_ms, sample_num, elapsed_time, tsc_ghz):
+    """ Store master csv file information """
+
+    global graph_data_present
+
+    if cpu_mask[cpu_int] == 0:
+        return
+
+    try:
+        f_handle = open('cpu.csv', 'a')
+        string_buffer = "CPU_%03u, %05u, %06u, %u, %u, %u, %u, %u, %u, %u, %.4f, %u, %.2f, %.3f, %u, %.3f, %.3f, %s\n" % (cpu_int, int(time_pre_dec), int(time_post_dec), int(core_busy), int(scaled), int(_from), int(_to), int(mperf), int(aperf), int(tsc), freq_ghz, int(io_boost), load, duration_ms, sample_num, elapsed_time, tsc_ghz, common_comm)
+        f_handle.write(string_buffer);
+        f_handle.close()
+    except:
+        print('IO error cpu.csv')
+        return
+
+    graph_data_present = True;
+
+def split_csv():
+    """ seperate the all csv file into per CPU csv files. """
+
+    global current_max_cpu
+
+    if os.path.exists('cpu.csv'):
+        for index in range(0, current_max_cpu + 1):
+            if cpu_mask[int(index)] != 0:
+                os.system('grep -m 1 common_cpu cpu.csv > cpu{:0>3}.csv'.format(index))
+                os.system('grep CPU_{:0>3} cpu.csv >> cpu{:0>3}.csv'.format(index, index))
+
+def cleanup_data_files():
+    """ clean up existing data files """
+
+    if os.path.exists('cpu.csv'):
+        os.remove('cpu.csv')
+    f_handle = open('cpu.csv', 'a')
+    f_handle.write('common_cpu, common_secs, common_usecs, core_busy, scaled_busy, from, to, mperf, aperf, tsc, freq, boost, load, duration_ms, sample_num, elapsed_time, tsc_ghz, common_comm')
+    f_handle.write('\n')
+    f_handle.close()
+
+def clear_trace_file():
+    """ Clear trace file """
+
+    try:
+        f_handle = open('/sys/kernel/debug/tracing/trace', 'w')
+        f_handle.close()
+    except:
+        print('IO error clearing trace file ')
+        quit()
+
+def enable_trace():
+    """ Enable trace """
+
+    try:
+       open('/sys/kernel/debug/tracing/events/power/pstate_sample/enable'
+                 , 'w').write("1")
+    except:
+        print('IO error enabling trace ')
+        quit()
+
+def disable_trace():
+    """ Disable trace """
+
+    try:
+       open('/sys/kernel/debug/tracing/events/power/pstate_sample/enable'
+                 , 'w').write("0")
+    except:
+        print('IO error disabling trace ')
+        quit()
+
+def set_trace_buffer_size():
+    """ Set trace buffer size """
+
+    try:
+       open('/sys/kernel/debug/tracing/buffer_size_kb'
+                 , 'w').write("10240")
+    except:
+        print('IO error setting trace buffer size ')
+        quit()
+
+def read_trace_data(filename):
+    """ Read and parse trace data """
+
+    global current_max_cpu
+    global sample_num, last_sec_cpu, last_usec_cpu, start_time
+
+    try:
+        data = open(filename, 'r').read()
+    except:
+        print('Error opening ', filename)
+        quit()
+
+    for line in data.splitlines():
+        search_obj = \
+            re.search(r'(^(.*?)\[)((\d+)[^\]])(.*?)(\d+)([.])(\d+)(.*?core_busy=)(\d+)(.*?scaled=)(\d+)(.*?from=)(\d+)(.*?to=)(\d+)(.*?mperf=)(\d+)(.*?aperf=)(\d+)(.*?tsc=)(\d+)(.*?freq=)(\d+)'
+                      , line)
+
+        if search_obj:
+            cpu = search_obj.group(3)
+            cpu_int = int(cpu)
+            cpu = str(cpu_int)
+
+            time_pre_dec = search_obj.group(6)
+            time_post_dec = search_obj.group(8)
+            core_busy = search_obj.group(10)
+            scaled = search_obj.group(12)
+            _from = search_obj.group(14)
+            _to = search_obj.group(16)
+            mperf = search_obj.group(18)
+            aperf = search_obj.group(20)
+            tsc = search_obj.group(22)
+            freq = search_obj.group(24)
+            common_comm = search_obj.group(2).replace(' ', '')
+
+            # Not all kernel versions have io_boost field
+            io_boost = '0'
+            search_obj = re.search(r'.*?io_boost=(\d+)', line)
+            if search_obj:
+                io_boost = search_obj.group(1)
+
+            if sample_num == 0 :
+                start_time = Decimal(time_pre_dec) + Decimal(time_post_dec) / Decimal(1000000)
+            sample_num += 1
+
+            if last_sec_cpu[cpu_int] == 0 :
+                last_sec_cpu[cpu_int] = time_pre_dec
+                last_usec_cpu[cpu_int] = time_post_dec
+            else :
+                duration_us = (int(time_pre_dec) - int(last_sec_cpu[cpu_int])) * 1000000 + (int(time_post_dec) - int(last_usec_cpu[cpu_int]))
+                duration_ms = Decimal(duration_us) / Decimal(1000)
+                last_sec_cpu[cpu_int] = time_pre_dec
+                last_usec_cpu[cpu_int] = time_post_dec
+                elapsed_time = Decimal(time_pre_dec) + Decimal(time_post_dec) / Decimal(1000000) - start_time
+                load = Decimal(int(mperf)*100)/ Decimal(tsc)
+                freq_ghz = Decimal(freq)/Decimal(1000000)
+#               Sanity check calculation, typically anomalies indicate missed samples
+#               However, check for 0 (should never occur)
+                tsc_ghz = Decimal(0)
+                if duration_ms != Decimal(0) :
+                    tsc_ghz = Decimal(tsc)/duration_ms/Decimal(1000000)
+                store_csv(cpu_int, time_pre_dec, time_post_dec, core_busy, scaled, _from, _to, mperf, aperf, tsc, freq_ghz, io_boost, common_comm, load, duration_ms, sample_num, elapsed_time, tsc_ghz)
+
+            if cpu_int > current_max_cpu:
+                current_max_cpu = cpu_int
+# End of for each trace line loop
+# Now seperate the main overall csv file into per CPU csv files.
+    split_csv()
+
+interval = ""
+filename = ""
+cpu_list = ""
+testname = ""
+graph_data_present = False;
+
+valid1 = False
+valid2 = False
+
+cpu_mask = zeros((MAX_CPUS,), dtype=int)
+
+try:
+    opts, args = getopt.getopt(sys.argv[1:],"ht:i:c:n:",["help","trace_file=","interval=","cpu=","name="])
+except getopt.GetoptError:
+    print_help()
+    sys.exit(2)
+for opt, arg in opts:
+    if opt == '-h':
+        print()
+        sys.exit()
+    elif opt in ("-t", "--trace_file"):
+        valid1 = True
+        location = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
+        filename = os.path.join(location, arg)
+    elif opt in ("-i", "--interval"):
+        valid1 = True
+        interval = arg
+    elif opt in ("-c", "--cpu"):
+        cpu_list = arg
+    elif opt in ("-n", "--name"):
+        valid2 = True
+        testname = arg
+
+if not (valid1 and valid2):
+    print_help()
+    sys.exit()
+
+if cpu_list:
+    for p in re.split("[,]", cpu_list):
+        if int(p) < MAX_CPUS :
+            cpu_mask[int(p)] = 1
+else:
+    for i in range (0, MAX_CPUS):
+        cpu_mask[i] = 1
+
+if not os.path.exists('results'):
+    os.mkdir('results')
+
+os.chdir('results')
+if os.path.exists(testname):
+    print('The test name directory already exists. Please provide a unique test name. Test re-run not supported, yet.')
+    sys.exit()
+os.mkdir(testname)
+os.chdir(testname)
+
+# Temporary (or perhaps not)
+cur_version = sys.version_info
+print('python version (should be >= 2.7):')
+print(cur_version)
+
+# Left as "cleanup" for potential future re-run ability.
+cleanup_data_files()
+
+if interval:
+    filename = "/sys/kernel/debug/tracing/trace"
+    clear_trace_file()
+    set_trace_buffer_size()
+    enable_trace()
+    print('Sleeping for ', interval, 'seconds')
+    time.sleep(int(interval))
+    disable_trace()
+
+current_max_cpu = 0
+
+read_trace_data(filename)
+
+if graph_data_present == False:
+    print('No valid data to plot')
+    sys.exit(2)
+
+for cpu_no in range(0, current_max_cpu + 1):
+    plot_perf_busy_with_sample(cpu_no)
+    plot_perf_busy(cpu_no)
+    plot_durations(cpu_no)
+    plot_loads(cpu_no)
+
+plot_pstate_cpu_with_sample()
+plot_pstate_cpu()
+plot_load_cpu()
+plot_frequency_cpu()
+plot_duration_cpu()
+plot_scaled_cpu()
+plot_boost_cpu()
+plot_ghz_cpu()
+
+os.chdir('../../')
index 8abbef164b4eeaf5fff90118f201dce07539ed81..621578aa12d6fe456134369b97968126c3a1981e 100644 (file)
@@ -32,7 +32,6 @@ EXTRA_WARNINGS += -Wold-style-definition
 EXTRA_WARNINGS += -Wpacked
 EXTRA_WARNINGS += -Wredundant-decls
 EXTRA_WARNINGS += -Wshadow
-EXTRA_WARNINGS += -Wstrict-aliasing=3
 EXTRA_WARNINGS += -Wstrict-prototypes
 EXTRA_WARNINGS += -Wswitch-default
 EXTRA_WARNINGS += -Wswitch-enum
@@ -40,12 +39,26 @@ EXTRA_WARNINGS += -Wundef
 EXTRA_WARNINGS += -Wwrite-strings
 EXTRA_WARNINGS += -Wformat
 
+ifneq ($(CC), clang)
+EXTRA_WARNINGS += -Wstrict-aliasing=3
+endif
+
 ifneq ($(findstring $(MAKEFLAGS), w),w)
 PRINT_DIR = --no-print-directory
 else
 NO_SUBDIR = :
 endif
 
+ifneq ($(filter 4.%,$(MAKE_VERSION)),)  # make-4
+ifneq ($(filter %s ,$(firstword x$(MAKEFLAGS))),)
+  silent=1
+endif
+else                                   # make-3.8x
+ifneq ($(filter s% -s%,$(MAKEFLAGS)),)
+  silent=1
+endif
+endif
+
 #
 # Define a callable command for descending to a new directory
 #
@@ -58,7 +71,7 @@ descend = \
 QUIET_SUBDIR0  = +$(MAKE) $(COMMAND_O) -C # space to separate -C and subdir
 QUIET_SUBDIR1  =
 
-ifneq ($(findstring $(MAKEFLAGS),s),s)
+ifneq ($(silent),1)
   ifneq ($(V),1)
        QUIET_CC       = @echo '  CC       '$@;
        QUIET_CC_FPIC  = @echo '  CC FPIC  '$@;
index b13fed534d761742700c21491887667f22a403c9..9f7bd1915c217bedc9b2ae51bb347b9a39bd1192 100644 (file)
@@ -67,21 +67,23 @@ static int map_equal(int lru_map, int expected)
        return map_subset(lru_map, expected) && map_subset(expected, lru_map);
 }
 
-static int sched_next_online(int pid, int next_to_try)
+static int sched_next_online(int pid, int *next_to_try)
 {
        cpu_set_t cpuset;
+       int next = *next_to_try;
+       int ret = -1;
 
-       if (next_to_try == nr_cpus)
-               return -1;
-
-       while (next_to_try < nr_cpus) {
+       while (next < nr_cpus) {
                CPU_ZERO(&cpuset);
-               CPU_SET(next_to_try++, &cpuset);
-               if (!sched_setaffinity(pid, sizeof(cpuset), &cpuset))
+               CPU_SET(next++, &cpuset);
+               if (!sched_setaffinity(pid, sizeof(cpuset), &cpuset)) {
+                       ret = 0;
                        break;
+               }
        }
 
-       return next_to_try;
+       *next_to_try = next;
+       return ret;
 }
 
 /* Size of the LRU amp is 2
@@ -96,11 +98,12 @@ static void test_lru_sanity0(int map_type, int map_flags)
 {
        unsigned long long key, value[nr_cpus];
        int lru_map_fd, expected_map_fd;
+       int next_cpu = 0;
 
        printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
               map_flags);
 
-       assert(sched_next_online(0, 0) != -1);
+       assert(sched_next_online(0, &next_cpu) != -1);
 
        if (map_flags & BPF_F_NO_COMMON_LRU)
                lru_map_fd = create_map(map_type, map_flags, 2 * nr_cpus);
@@ -183,6 +186,7 @@ static void test_lru_sanity1(int map_type, int map_flags, unsigned int tgt_free)
        int lru_map_fd, expected_map_fd;
        unsigned int batch_size;
        unsigned int map_size;
+       int next_cpu = 0;
 
        if (map_flags & BPF_F_NO_COMMON_LRU)
                /* Ther percpu lru list (i.e each cpu has its own LRU
@@ -196,7 +200,7 @@ static void test_lru_sanity1(int map_type, int map_flags, unsigned int tgt_free)
        printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
               map_flags);
 
-       assert(sched_next_online(0, 0) != -1);
+       assert(sched_next_online(0, &next_cpu) != -1);
 
        batch_size = tgt_free / 2;
        assert(batch_size * 2 == tgt_free);
@@ -262,6 +266,7 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free)
        int lru_map_fd, expected_map_fd;
        unsigned int batch_size;
        unsigned int map_size;
+       int next_cpu = 0;
 
        if (map_flags & BPF_F_NO_COMMON_LRU)
                /* Ther percpu lru list (i.e each cpu has its own LRU
@@ -275,7 +280,7 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free)
        printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
               map_flags);
 
-       assert(sched_next_online(0, 0) != -1);
+       assert(sched_next_online(0, &next_cpu) != -1);
 
        batch_size = tgt_free / 2;
        assert(batch_size * 2 == tgt_free);
@@ -370,11 +375,12 @@ static void test_lru_sanity3(int map_type, int map_flags, unsigned int tgt_free)
        int lru_map_fd, expected_map_fd;
        unsigned int batch_size;
        unsigned int map_size;
+       int next_cpu = 0;
 
        printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
               map_flags);
 
-       assert(sched_next_online(0, 0) != -1);
+       assert(sched_next_online(0, &next_cpu) != -1);
 
        batch_size = tgt_free / 2;
        assert(batch_size * 2 == tgt_free);
@@ -430,11 +436,12 @@ static void test_lru_sanity4(int map_type, int map_flags, unsigned int tgt_free)
        int lru_map_fd, expected_map_fd;
        unsigned long long key, value[nr_cpus];
        unsigned long long end_key;
+       int next_cpu = 0;
 
        printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
               map_flags);
 
-       assert(sched_next_online(0, 0) != -1);
+       assert(sched_next_online(0, &next_cpu) != -1);
 
        if (map_flags & BPF_F_NO_COMMON_LRU)
                lru_map_fd = create_map(map_type, map_flags,
@@ -502,9 +509,8 @@ static void do_test_lru_sanity5(unsigned long long last_key, int map_fd)
 static void test_lru_sanity5(int map_type, int map_flags)
 {
        unsigned long long key, value[nr_cpus];
-       int next_sched_cpu = 0;
+       int next_cpu = 0;
        int map_fd;
-       int i;
 
        if (map_flags & BPF_F_NO_COMMON_LRU)
                return;
@@ -519,27 +525,20 @@ static void test_lru_sanity5(int map_type, int map_flags)
        key = 0;
        assert(!bpf_map_update(map_fd, &key, value, BPF_NOEXIST));
 
-       for (i = 0; i < nr_cpus; i++) {
+       while (sched_next_online(0, &next_cpu) != -1) {
                pid_t pid;
 
                pid = fork();
                if (pid == 0) {
-                       next_sched_cpu = sched_next_online(0, next_sched_cpu);
-                       if (next_sched_cpu != -1)
-                               do_test_lru_sanity5(key, map_fd);
+                       do_test_lru_sanity5(key, map_fd);
                        exit(0);
                } else if (pid == -1) {
-                       printf("couldn't spawn #%d process\n", i);
+                       printf("couldn't spawn process to test key:%llu\n",
+                              key);
                        exit(1);
                } else {
                        int status;
 
-                       /* It is mostly redundant and just allow the parent
-                        * process to update next_shced_cpu for the next child
-                        * process
-                        */
-                       next_sched_cpu = sched_next_online(pid, next_sched_cpu);
-
                        assert(waitpid(pid, &status, 0) == pid);
                        assert(status == 0);
                        key++;
@@ -547,6 +546,8 @@ static void test_lru_sanity5(int map_type, int map_flags)
        }
 
        close(map_fd);
+       /* At least one key should be tested */
+       assert(key > 0);
 
        printf("Pass\n");
 }
diff --git a/tools/testing/selftests/locking/ww_mutex.sh b/tools/testing/selftests/locking/ww_mutex.sh
new file mode 100644 (file)
index 0000000..6905da9
--- /dev/null
@@ -0,0 +1,10 @@
+#!/bin/sh
+# Runs API tests for struct ww_mutex (Wait/Wound mutexes)
+
+if /sbin/modprobe -q test-ww_mutex; then
+       /sbin/modprobe -q -r test-ww_mutex
+       echo "locking/ww_mutex: ok"
+else
+       echo "locking/ww_mutex: [FAIL]"
+       exit 1
+fi
index b9611c523723982e726548742b700d29ed0be7c4..41bae5824339453c7a0b3a9fc6db60ba98588bc5 100644 (file)
@@ -4,3 +4,4 @@ LOCK03
 LOCK04
 LOCK05
 LOCK06
+LOCK07
diff --git a/tools/testing/selftests/rcutorture/configs/lock/LOCK07 b/tools/testing/selftests/rcutorture/configs/lock/LOCK07
new file mode 100644 (file)
index 0000000..1d1da14
--- /dev/null
@@ -0,0 +1,6 @@
+CONFIG_SMP=y
+CONFIG_NR_CPUS=4
+CONFIG_HOTPLUG_CPU=y
+CONFIG_PREEMPT_NONE=n
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=y
diff --git a/tools/testing/selftests/rcutorture/configs/lock/LOCK07.boot b/tools/testing/selftests/rcutorture/configs/lock/LOCK07.boot
new file mode 100644 (file)
index 0000000..97dadd1
--- /dev/null
@@ -0,0 +1 @@
+locktorture.torture_type=ww_mutex_lock
index f824b4c9d9d9132d9681c5fe51c26caa76d4d8de..d2d2a86139db1ccd661d0ed70fb9feeb353748aa 100644 (file)
@@ -1,5 +1,2 @@
 CONFIG_RCU_TORTURE_TEST=y
 CONFIG_PRINTK_TIME=y
-CONFIG_RCU_TORTURE_TEST_SLOW_CLEANUP=y
-CONFIG_RCU_TORTURE_TEST_SLOW_INIT=y
-CONFIG_RCU_TORTURE_TEST_SLOW_PREINIT=y
index 0a63e073a00c7a3765bc3732b453bf8c0a19bf09..6db705e554874a2b70229e4383c0bae675d3cf51 100644 (file)
@@ -7,6 +7,7 @@ CONFIG_HZ_PERIODIC=n
 CONFIG_NO_HZ_IDLE=y
 CONFIG_NO_HZ_FULL=n
 CONFIG_RCU_TRACE=n
+#CHECK#CONFIG_RCU_STALL_COMMON=n
 CONFIG_DEBUG_LOCK_ALLOC=n
 CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
 CONFIG_PREEMPT_COUNT=n
index f1892e0371c954bd5cfc800a6b9cc87297e9433e..a59f7686e219f409d59f0b6068f5673bc07f049c 100644 (file)
@@ -8,7 +8,8 @@ CONFIG_NO_HZ_IDLE=n
 CONFIG_NO_HZ_FULL=n
 CONFIG_RCU_TRACE=y
 CONFIG_PROVE_LOCKING=y
+CONFIG_PROVE_RCU_REPEATEDLY=y
 #CHECK#CONFIG_PROVE_RCU=y
 CONFIG_DEBUG_LOCK_ALLOC=y
-CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
+CONFIG_DEBUG_OBJECTS_RCU_HEAD=y
 CONFIG_PREEMPT_COUNT=y
index f572b873c6204327dc301f6c06b5134ba41ad022..359cb258f6399c4d7465d3d4fbeaa3531da02d9e 100644 (file)
@@ -16,3 +16,6 @@ CONFIG_DEBUG_LOCK_ALLOC=n
 CONFIG_RCU_BOOST=n
 CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
 CONFIG_RCU_EXPERT=y
+CONFIG_RCU_TORTURE_TEST_SLOW_CLEANUP=y
+CONFIG_RCU_TORTURE_TEST_SLOW_INIT=y
+CONFIG_RCU_TORTURE_TEST_SLOW_PREINIT=y
index ef6a22c44dead76ba7f2d2e6ec2e257accd87d74..c1ab5926568b0010e78e430ddf8511cdfe480e5b 100644 (file)
@@ -20,3 +20,7 @@ CONFIG_PROVE_LOCKING=n
 CONFIG_RCU_BOOST=n
 CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
 CONFIG_RCU_EXPERT=y
+CONFIG_RCU_TORTURE_TEST_SLOW_CLEANUP=y
+CONFIG_RCU_TORTURE_TEST_SLOW_INIT=y
+CONFIG_RCU_TORTURE_TEST_SLOW_PREINIT=y
+CONFIG_DEBUG_OBJECTS_RCU_HEAD=y
index 7a17c503b382cdca20dbca7ed0d5189842e75a68..3b93ee544e70a2a85408933e4a6e06f3165a478e 100644 (file)
@@ -17,3 +17,6 @@ CONFIG_RCU_BOOST=y
 CONFIG_RCU_KTHREAD_PRIO=2
 CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
 CONFIG_RCU_EXPERT=y
+CONFIG_RCU_TORTURE_TEST_SLOW_CLEANUP=y
+CONFIG_RCU_TORTURE_TEST_SLOW_INIT=y
+CONFIG_RCU_TORTURE_TEST_SLOW_PREINIT=y
index 17cbe098b115ce9421a6930a3720d5a834cf07ca..5af758e783c744440ae3e758a63b80f4be2e2ef8 100644 (file)
@@ -19,3 +19,7 @@ CONFIG_RCU_NOCB_CPU=n
 CONFIG_DEBUG_LOCK_ALLOC=n
 CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
 CONFIG_RCU_EXPERT=y
+CONFIG_RCU_TORTURE_TEST_SLOW_CLEANUP=y
+CONFIG_RCU_TORTURE_TEST_SLOW_INIT=y
+CONFIG_RCU_TORTURE_TEST_SLOW_PREINIT=y
+CONFIG_RCU_EQS_DEBUG=y
index 1257d3227b1e18a4e9863e223f0be509c5827528..d4cdc0d74e16885adb4d78e90267223f01c907d0 100644 (file)
@@ -19,3 +19,6 @@ CONFIG_PROVE_LOCKING=y
 #CHECK#CONFIG_PROVE_RCU=y
 CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
 CONFIG_RCU_EXPERT=y
+CONFIG_RCU_TORTURE_TEST_SLOW_CLEANUP=y
+CONFIG_RCU_TORTURE_TEST_SLOW_INIT=y
+CONFIG_RCU_TORTURE_TEST_SLOW_PREINIT=y
index d3e456b74cbe24a1204f44ba65051b162779f2f8..4cb02bd28f0825a82a18a38bd509f33eb09d0941 100644 (file)
@@ -20,3 +20,6 @@ CONFIG_PROVE_LOCKING=y
 #CHECK#CONFIG_PROVE_RCU=y
 CONFIG_DEBUG_OBJECTS_RCU_HEAD=y
 CONFIG_RCU_EXPERT=y
+CONFIG_RCU_TORTURE_TEST_SLOW_CLEANUP=y
+CONFIG_RCU_TORTURE_TEST_SLOW_INIT=y
+CONFIG_RCU_TORTURE_TEST_SLOW_PREINIT=y
index 3956b4131f72b22bacbd6de9a3b2ed255aaa9387..b12a3ea1867ea9d60651a60e3f3c96e2c9e8d91b 100644 (file)
@@ -19,3 +19,6 @@ CONFIG_RCU_NOCB_CPU=n
 CONFIG_DEBUG_LOCK_ALLOC=n
 CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
 CONFIG_RCU_EXPERT=y
+CONFIG_RCU_TORTURE_TEST_SLOW_CLEANUP=y
+CONFIG_RCU_TORTURE_TEST_SLOW_INIT=y
+CONFIG_RCU_TORTURE_TEST_SLOW_PREINIT=y
index bb9b0c1a23c238a454f8c8fb6fd09a0c4683ee9f..099cc63c6a3b49f3a2b21ef53abb61515bb47608 100644 (file)
@@ -17,8 +17,8 @@ CONFIG_RCU_FANOUT_LEAF=2
 CONFIG_RCU_NOCB_CPU=y
 CONFIG_RCU_NOCB_CPU_ALL=y
 CONFIG_DEBUG_LOCK_ALLOC=n
-CONFIG_PROVE_LOCKING=y
-#CHECK#CONFIG_PROVE_RCU=y
+CONFIG_PROVE_LOCKING=n
 CONFIG_RCU_BOOST=n
 CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
 CONFIG_RCU_EXPERT=y
+CONFIG_RCU_EQS_DEBUG=y
index 4e2b1893d40d0d8e11c47ac6400fdc8a9c707962..364801b1a230758ad1ca410e5d4923bcc9446dbf 100644 (file)
@@ -14,6 +14,7 @@ CONFIG_NO_HZ_FULL_SYSIDLE -- Do one.
 CONFIG_PREEMPT -- Do half.  (First three and #8.)
 CONFIG_PROVE_LOCKING -- Do several, covering CONFIG_DEBUG_LOCK_ALLOC=y and not.
 CONFIG_PROVE_RCU -- Hardwired to CONFIG_PROVE_LOCKING.
+CONFIG_PROVE_RCU_REPEATEDLY -- Do one.
 CONFIG_RCU_BOOST -- one of PREEMPT_RCU.
 CONFIG_RCU_KTHREAD_PRIO -- set to 2 for _BOOST testing.
 CONFIG_RCU_FANOUT -- Cover hierarchy, but overlap with others.
@@ -25,7 +26,12 @@ CONFIG_RCU_NOCB_CPU_NONE -- Do one.
 CONFIG_RCU_NOCB_CPU_ZERO -- Do one.
 CONFIG_RCU_TRACE -- Do half.
 CONFIG_SMP -- Need one !SMP for PREEMPT_RCU.
-!RCU_EXPERT -- Do a few, but these have to be vanilla configurations.
+CONFIG_RCU_EXPERT=n -- Do a few, but these have to be vanilla configurations.
+CONFIG_RCU_EQS_DEBUG -- Do at least one for CONFIG_NO_HZ_FULL and not.
+CONFIG_RCU_TORTURE_TEST_SLOW_CLEANUP -- Do for all but a couple TREE scenarios.
+CONFIG_RCU_TORTURE_TEST_SLOW_INIT -- Do for all but a couple TREE scenarios.
+CONFIG_RCU_TORTURE_TEST_SLOW_PREINIT -- Do for all but a couple TREE scenarios.
+
 RCU-bh: Do one with PREEMPT and one with !PREEMPT.
 RCU-sched: Do one with PREEMPT but not BOOST.
 
@@ -72,7 +78,30 @@ CONFIG_RCU_TORTURE_TEST_RUNNABLE
 
        Always used in KVM testing.
 
+CONFIG_RCU_TORTURE_TEST_SLOW_PREINIT_DELAY
+CONFIG_RCU_TORTURE_TEST_SLOW_INIT_DELAY
+CONFIG_RCU_TORTURE_TEST_SLOW_CLEANUP_DELAY
+
+       Inspection suffices, ignore.
+
 CONFIG_PREEMPT_RCU
 CONFIG_TREE_RCU
+CONFIG_TINY_RCU
+
+       These are controlled by CONFIG_PREEMPT and/or CONFIG_SMP.
+
+CONFIG_SPARSE_RCU_POINTER
+
+       Makes sense only for sparse runs, not for kernel builds.
+
+CONFIG_SRCU
+CONFIG_TASKS_RCU
+
+       Selected by CONFIG_RCU_TORTURE_TEST, so cannot disable.
+
+CONFIG_RCU_TRACE
+
+       Implied by CONFIG_RCU_TRACE for Tree RCU.
+
 
-       These are controlled by CONFIG_PREEMPT.
+boot parameters ignored: TBD
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/.gitignore b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/.gitignore
new file mode 100644 (file)
index 0000000..712a3d4
--- /dev/null
@@ -0,0 +1 @@
+srcu.c
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/Makefile b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/Makefile
new file mode 100644 (file)
index 0000000..16b0155
--- /dev/null
@@ -0,0 +1,16 @@
+all: srcu.c store_buffering
+
+LINUX_SOURCE = ../../../../../..
+
+modified_srcu_input = $(LINUX_SOURCE)/include/linux/srcu.h \
+                     $(LINUX_SOURCE)/kernel/rcu/srcu.c
+
+modified_srcu_output = include/linux/srcu.h srcu.c
+
+include/linux/srcu.h: srcu.c
+
+srcu.c: modify_srcu.awk Makefile $(modified_srcu_input)
+       awk -f modify_srcu.awk $(modified_srcu_input) $(modified_srcu_output)
+
+store_buffering:
+       @cd tests/store_buffering; make
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/empty_includes/linux/delay.h b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/empty_includes/linux/delay.h
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/empty_includes/linux/export.h b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/empty_includes/linux/export.h
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/empty_includes/linux/mutex.h b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/empty_includes/linux/mutex.h
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/empty_includes/linux/percpu.h b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/empty_includes/linux/percpu.h
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/empty_includes/linux/preempt.h b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/empty_includes/linux/preempt.h
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/empty_includes/linux/rcupdate.h b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/empty_includes/linux/rcupdate.h
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/empty_includes/linux/sched.h b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/empty_includes/linux/sched.h
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/empty_includes/linux/smp.h b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/empty_includes/linux/smp.h
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/empty_includes/linux/workqueue.h b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/empty_includes/linux/workqueue.h
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/empty_includes/uapi/linux/types.h b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/empty_includes/uapi/linux/types.h
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/include/linux/.gitignore b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/include/linux/.gitignore
new file mode 100644 (file)
index 0000000..1d016e6
--- /dev/null
@@ -0,0 +1 @@
+srcu.h
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/include/linux/kconfig.h b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/include/linux/kconfig.h
new file mode 100644 (file)
index 0000000..f2860dd
--- /dev/null
@@ -0,0 +1 @@
+#include <LINUX_SOURCE/linux/kconfig.h>
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/include/linux/types.h b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/include/linux/types.h
new file mode 100644 (file)
index 0000000..4a3d538
--- /dev/null
@@ -0,0 +1,155 @@
+/*
+ * This header has been modifies to remove definitions of types that
+ * are defined in standard userspace headers or are problematic for some
+ * other reason.
+ */
+
+#ifndef _LINUX_TYPES_H
+#define _LINUX_TYPES_H
+
+#define __EXPORTED_HEADERS__
+#include <uapi/linux/types.h>
+
+#ifndef __ASSEMBLY__
+
+#define DECLARE_BITMAP(name, bits) \
+       unsigned long name[BITS_TO_LONGS(bits)]
+
+typedef __u32 __kernel_dev_t;
+
+/* bsd */
+typedef unsigned char          u_char;
+typedef unsigned short         u_short;
+typedef unsigned int           u_int;
+typedef unsigned long          u_long;
+
+/* sysv */
+typedef unsigned char          unchar;
+typedef unsigned short         ushort;
+typedef unsigned int           uint;
+typedef unsigned long          ulong;
+
+#ifndef __BIT_TYPES_DEFINED__
+#define __BIT_TYPES_DEFINED__
+
+typedef                __u8            u_int8_t;
+typedef                __s8            int8_t;
+typedef                __u16           u_int16_t;
+typedef                __s16           int16_t;
+typedef                __u32           u_int32_t;
+typedef                __s32           int32_t;
+
+#endif /* !(__BIT_TYPES_DEFINED__) */
+
+typedef                __u8            uint8_t;
+typedef                __u16           uint16_t;
+typedef                __u32           uint32_t;
+
+/* this is a special 64bit data type that is 8-byte aligned */
+#define aligned_u64 __u64 __attribute__((aligned(8)))
+#define aligned_be64 __be64 __attribute__((aligned(8)))
+#define aligned_le64 __le64 __attribute__((aligned(8)))
+
+/**
+ * The type used for indexing onto a disc or disc partition.
+ *
+ * Linux always considers sectors to be 512 bytes long independently
+ * of the devices real block size.
+ *
+ * blkcnt_t is the type of the inode's block count.
+ */
+#ifdef CONFIG_LBDAF
+typedef u64 sector_t;
+#else
+typedef unsigned long sector_t;
+#endif
+
+/*
+ * The type of an index into the pagecache.
+ */
+#define pgoff_t unsigned long
+
+/*
+ * A dma_addr_t can hold any valid DMA address, i.e., any address returned
+ * by the DMA API.
+ *
+ * If the DMA API only uses 32-bit addresses, dma_addr_t need only be 32
+ * bits wide.  Bus addresses, e.g., PCI BARs, may be wider than 32 bits,
+ * but drivers do memory-mapped I/O to ioremapped kernel virtual addresses,
+ * so they don't care about the size of the actual bus addresses.
+ */
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+typedef u64 dma_addr_t;
+#else
+typedef u32 dma_addr_t;
+#endif
+
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+typedef u64 phys_addr_t;
+#else
+typedef u32 phys_addr_t;
+#endif
+
+typedef phys_addr_t resource_size_t;
+
+/*
+ * This type is the placeholder for a hardware interrupt number. It has to be
+ * big enough to enclose whatever representation is used by a given platform.
+ */
+typedef unsigned long irq_hw_number_t;
+
+typedef struct {
+       int counter;
+} atomic_t;
+
+#ifdef CONFIG_64BIT
+typedef struct {
+       long counter;
+} atomic64_t;
+#endif
+
+struct list_head {
+       struct list_head *next, *prev;
+};
+
+struct hlist_head {
+       struct hlist_node *first;
+};
+
+struct hlist_node {
+       struct hlist_node *next, **pprev;
+};
+
+/**
+ * struct callback_head - callback structure for use with RCU and task_work
+ * @next: next update requests in a list
+ * @func: actual update function to call after the grace period.
+ *
+ * The struct is aligned to size of pointer. On most architectures it happens
+ * naturally due ABI requirements, but some architectures (like CRIS) have
+ * weird ABI and we need to ask it explicitly.
+ *
+ * The alignment is required to guarantee that bits 0 and 1 of @next will be
+ * clear under normal conditions -- as long as we use call_rcu(),
+ * call_rcu_bh(), call_rcu_sched(), or call_srcu() to queue callback.
+ *
+ * This guarantee is important for few reasons:
+ *  - future call_rcu_lazy() will make use of lower bits in the pointer;
+ *  - the structure shares storage spacer in struct page with @compound_head,
+ *    which encode PageTail() in bit 0. The guarantee is needed to avoid
+ *    false-positive PageTail().
+ */
+struct callback_head {
+       struct callback_head *next;
+       void (*func)(struct callback_head *head);
+} __attribute__((aligned(sizeof(void *))));
+#define rcu_head callback_head
+
+typedef void (*rcu_callback_t)(struct rcu_head *head);
+typedef void (*call_rcu_func_t)(struct rcu_head *head, rcu_callback_t func);
+
+/* clocksource cycle base type */
+typedef u64 cycle_t;
+
+#endif /*  __ASSEMBLY__ */
+#endif /* _LINUX_TYPES_H */
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/modify_srcu.awk b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/modify_srcu.awk
new file mode 100755 (executable)
index 0000000..8ff8904
--- /dev/null
@@ -0,0 +1,375 @@
+#!/bin/awk -f
+
+# Modify SRCU for formal verification. The first argument should be srcu.h and
+# the second should be srcu.c. Outputs modified srcu.h and srcu.c into the
+# current directory.
+
+BEGIN {
+       if (ARGC != 5) {
+               print "Usange: input.h input.c output.h output.c" > "/dev/stderr";
+               exit 1;
+       }
+       h_output = ARGV[3];
+       c_output = ARGV[4];
+       ARGC = 3;
+
+       # Tokenize using FS and not RS as FS supports regular expressions. Each
+       # record is one line of source, except that backslashed lines are
+       # combined. Comments are treated as field separators, as are quotes.
+       quote_regexp="\"([^\\\\\"]|\\\\.)*\"";
+       comment_regexp="\\/\\*([^*]|\\*+[^*/])*\\*\\/|\\/\\/.*(\n|$)";
+       FS="([ \\\\\t\n\v\f;,.=(){}+*/<>&|^-]|\\[|\\]|" comment_regexp "|" quote_regexp ")+";
+
+       inside_srcu_struct = 0;
+       inside_srcu_init_def = 0;
+       srcu_init_param_name = "";
+       in_macro = 0;
+       brace_nesting = 0;
+       paren_nesting = 0;
+
+       # Allow the manipulation of the last field separator after has been
+       # seen.
+       last_fs = "";
+       # Whether the last field separator was intended to be output.
+       last_fs_print = 0;
+
+       # rcu_batches stores the initialization for each instance of struct
+       # rcu_batch
+
+       in_comment = 0;
+
+       outputfile = "";
+}
+
+{
+       prev_outputfile = outputfile;
+       if (FILENAME ~ /\.h$/) {
+               outputfile = h_output;
+               if (FNR != NR) {
+                       print "Incorrect file order" > "/dev/stderr";
+                       exit 1;
+               }
+       }
+       else
+               outputfile = c_output;
+
+       if (prev_outputfile && outputfile != prev_outputfile) {
+               new_outputfile = outputfile;
+               outputfile = prev_outputfile;
+               update_fieldsep("", 0);
+               outputfile = new_outputfile;
+       }
+}
+
+# Combine the next line into $0.
+function combine_line() {
+       ret = getline next_line;
+       if (ret == 0) {
+               # Don't allow two consecutive getlines at the end of the file
+               if (eof_found) {
+                       print "Error: expected more input." > "/dev/stderr";
+                       exit 1;
+               } else {
+                       eof_found = 1;
+               }
+       } else if (ret == -1) {
+               print "Error reading next line of file" FILENAME > "/dev/stderr";
+               exit 1;
+       }
+       $0 = $0 "\n" next_line;
+}
+
+# Combine backslashed lines and multiline comments.
+function combine_backslashes() {
+       while (/\\$|\/\*([^*]|\*+[^*\/])*\**$/) {
+               combine_line();
+       }
+}
+
+function read_line() {
+       combine_line();
+       combine_backslashes();
+}
+
+# Print out field separators and update variables that depend on them. Only
+# print if p is true. Call with sep="" and p=0 to print out the last field
+# separator.
+function update_fieldsep(sep, p) {
+       # Count braces
+       sep_tmp = sep;
+       gsub(quote_regexp "|" comment_regexp, "", sep_tmp);
+       while (1)
+       {
+               if (sub("[^{}()]*\\{", "", sep_tmp)) {
+                       brace_nesting++;
+                       continue;
+               }
+               if (sub("[^{}()]*\\}", "", sep_tmp)) {
+                       brace_nesting--;
+                       if (brace_nesting < 0) {
+                               print "Unbalanced braces!" > "/dev/stderr";
+                               exit 1;
+                       }
+                       continue;
+               }
+               if (sub("[^{}()]*\\(", "", sep_tmp)) {
+                       paren_nesting++;
+                       continue;
+               }
+               if (sub("[^{}()]*\\)", "", sep_tmp)) {
+                       paren_nesting--;
+                       if (paren_nesting < 0) {
+                               print "Unbalanced parenthesis!" > "/dev/stderr";
+                               exit 1;
+                       }
+                       continue;
+               }
+
+               break;
+       }
+
+       if (last_fs_print)
+               printf("%s", last_fs) > outputfile;
+       last_fs = sep;
+       last_fs_print = p;
+}
+
+# Shifts the fields down by n positions. Calls next if there are no more. If p
+# is true then print out field separators.
+function shift_fields(n, p) {
+       do {
+               if (match($0, FS) > 0) {
+                       update_fieldsep(substr($0, RSTART, RLENGTH), p);
+                       if (RSTART + RLENGTH <= length())
+                               $0 = substr($0, RSTART + RLENGTH);
+                       else
+                               $0 = "";
+               } else {
+                       update_fieldsep("", 0);
+                       print "" > outputfile;
+                       next;
+               }
+       } while (--n > 0);
+}
+
+# Shifts and prints the first n fields.
+function print_fields(n) {
+       do {
+               update_fieldsep("", 0);
+               printf("%s", $1) > outputfile;
+               shift_fields(1, 1);
+       } while (--n > 0);
+}
+
+{
+       combine_backslashes();
+}
+
+# Print leading FS
+{
+       if (match($0, "^(" FS ")+") > 0) {
+               update_fieldsep(substr($0, RSTART, RLENGTH), 1);
+               if (RSTART + RLENGTH <= length())
+                       $0 = substr($0, RSTART + RLENGTH);
+               else
+                       $0 = "";
+       }
+}
+
+# Parse the line.
+{
+       while (NF > 0) {
+               if ($1 == "struct" && NF < 3) {
+                       read_line();
+                       continue;
+               }
+
+               if (FILENAME ~ /\.h$/ && !inside_srcu_struct &&
+                   brace_nesting == 0 && paren_nesting == 0 &&
+                   $1 == "struct" && $2 == "srcu_struct" &&
+                   $0 ~ "^struct(" FS ")+srcu_struct(" FS ")+\\{") {
+                       inside_srcu_struct = 1;
+                       print_fields(2);
+                       continue;
+               }
+               if (inside_srcu_struct && brace_nesting == 0 &&
+                   paren_nesting == 0) {
+                       inside_srcu_struct = 0;
+                       update_fieldsep("", 0);
+                       for (name in rcu_batches)
+                               print "extern struct rcu_batch " name ";" > outputfile;
+               }
+
+               if (inside_srcu_struct && $1 == "struct" && $2 == "rcu_batch") {
+                       # Move rcu_batches outside of the struct.
+                       rcu_batches[$3] = "";
+                       shift_fields(3, 1);
+                       sub(/;[[:space:]]*$/, "", last_fs);
+                       continue;
+               }
+
+               if (FILENAME ~ /\.h$/ && !inside_srcu_init_def &&
+                   $1 == "#define" && $2 == "__SRCU_STRUCT_INIT") {
+                       inside_srcu_init_def = 1;
+                       srcu_init_param_name = $3;
+                       in_macro = 1;
+                       print_fields(3);
+                       continue;
+               }
+               if (inside_srcu_init_def && brace_nesting == 0 &&
+                   paren_nesting == 0) {
+                       inside_srcu_init_def = 0;
+                       in_macro = 0;
+                       continue;
+               }
+
+               if (inside_srcu_init_def && brace_nesting == 1 &&
+                   paren_nesting == 0 && last_fs ~ /\.[[:space:]]*$/ &&
+                   $1 ~ /^[[:alnum:]_]+$/) {
+                       name = $1;
+                       if (name in rcu_batches) {
+                               # Remove the dot.
+                               sub(/\.[[:space:]]*$/, "", last_fs);
+
+                               old_record = $0;
+                               do
+                                       shift_fields(1, 0);
+                               while (last_fs !~ /,/ || paren_nesting > 0);
+                               end_loc = length(old_record) - length($0);
+                               end_loc += index(last_fs, ",") - length(last_fs);
+
+                               last_fs = substr(last_fs, index(last_fs, ",") + 1);
+                               last_fs_print = 1;
+
+                               match(old_record, "^"name"("FS")+=");
+                               start_loc = RSTART + RLENGTH;
+
+                               len = end_loc - start_loc;
+                               initializer = substr(old_record, start_loc, len);
+                               gsub(srcu_init_param_name "\\.", "", initializer);
+                               rcu_batches[name] = initializer;
+                               continue;
+                       }
+               }
+
+               # Don't include a nonexistent file
+               if (!in_macro && $1 == "#include" && /^#include[[:space:]]+"rcu\.h"/) {
+                       update_fieldsep("", 0);
+                       next;
+               }
+
+               # Ignore most preprocessor stuff.
+               if (!in_macro && $1 ~ /#/) {
+                       break;
+               }
+
+               if (brace_nesting > 0 && $1 ~ "^[[:alnum:]_]+$" && NF < 2) {
+                       read_line();
+                       continue;
+               }
+               if (brace_nesting > 0 &&
+                   $0 ~ "^[[:alnum:]_]+[[:space:]]*(\\.|->)[[:space:]]*[[:alnum:]_]+" &&
+                   $2 in rcu_batches) {
+                       # Make uses of rcu_batches global. Somewhat unreliable.
+                       shift_fields(1, 0);
+                       print_fields(1);
+                       continue;
+               }
+
+               if ($1 == "static" && NF < 3) {
+                       read_line();
+                       continue;
+               }
+               if ($1 == "static" && ($2 == "bool" && $3 == "try_check_zero" ||
+                                      $2 == "void" && $3 == "srcu_flip")) {
+                       shift_fields(1, 1);
+                       print_fields(2);
+                       continue;
+               }
+
+               # Distinguish between read-side and write-side memory barriers.
+               if ($1 == "smp_mb" && NF < 2) {
+                       read_line();
+                       continue;
+               }
+               if (match($0, /^smp_mb[[:space:]();\/*]*[[:alnum:]]/)) {
+                       barrier_letter = substr($0, RLENGTH, 1);
+                       if (barrier_letter ~ /A|D/)
+                               new_barrier_name = "sync_smp_mb";
+                       else if (barrier_letter ~ /B|C/)
+                               new_barrier_name = "rs_smp_mb";
+                       else {
+                               print "Unrecognized memory barrier." > "/dev/null";
+                               exit 1;
+                       }
+
+                       shift_fields(1, 1);
+                       printf("%s", new_barrier_name) > outputfile;
+                       continue;
+               }
+
+               # Skip definition of rcu_synchronize, since it is already
+               # defined in misc.h. Only present in old versions of srcu.
+               if (brace_nesting == 0 && paren_nesting == 0 &&
+                   $1 == "struct" && $2 == "rcu_synchronize" &&
+                   $0 ~ "^struct(" FS ")+rcu_synchronize(" FS ")+\\{") {
+                       shift_fields(2, 0);
+                       while (brace_nesting) {
+                               if (NF < 2)
+                                       read_line();
+                               shift_fields(1, 0);
+                       }
+               }
+
+               # Skip definition of wakeme_after_rcu for the same reason
+               if (brace_nesting == 0 && $1 == "static" && $2 == "void" &&
+                   $3 == "wakeme_after_rcu") {
+                       while (NF < 5)
+                               read_line();
+                       shift_fields(3, 0);
+                       do {
+                               while (NF < 3)
+                                       read_line();
+                               shift_fields(1, 0);
+                       } while (paren_nesting || brace_nesting);
+               }
+
+               if ($1 ~ /^(unsigned|long)$/ && NF < 3) {
+                       read_line();
+                       continue;
+               }
+
+               # Give srcu_batches_completed the correct type for old SRCU.
+               if (brace_nesting == 0 && $1 == "long" &&
+                   $2 == "srcu_batches_completed") {
+                       update_fieldsep("", 0);
+                       printf("unsigned ") > outputfile;
+                       print_fields(2);
+                       continue;
+               }
+               if (brace_nesting == 0 && $1 == "unsigned" && $2 == "long" &&
+                   $3 == "srcu_batches_completed") {
+                       print_fields(3);
+                       continue;
+               }
+
+               # Just print out the input code by default.
+               print_fields(1);
+       }
+       update_fieldsep("", 0);
+       print > outputfile;
+       next;
+}
+
+END {
+       update_fieldsep("", 0);
+
+       if (brace_nesting != 0) {
+               print "Unbalanced braces!" > "/dev/stderr";
+               exit 1;
+       }
+
+       # Define the rcu_batches
+       for (name in rcu_batches)
+               print "struct rcu_batch " name " = " rcu_batches[name] ";" > c_output;
+}
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/assume.h b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/assume.h
new file mode 100644 (file)
index 0000000..a649554
--- /dev/null
@@ -0,0 +1,16 @@
+#ifndef ASSUME_H
+#define ASSUME_H
+
+/* Provide an assumption macro that can be disabled for gcc. */
+#ifdef RUN
+#define assume(x) \
+       do { \
+               /* Evaluate x to suppress warnings. */ \
+               (void) (x); \
+       } while (0)
+
+#else
+#define assume(x) __CPROVER_assume(x)
+#endif
+
+#endif
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/barriers.h b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/barriers.h
new file mode 100644 (file)
index 0000000..6687acc
--- /dev/null
@@ -0,0 +1,41 @@
+#ifndef BARRIERS_H
+#define BARRIERS_H
+
+#define barrier() __asm__ __volatile__("" : : : "memory")
+
+#ifdef RUN
+#define smp_mb() __sync_synchronize()
+#define smp_mb__after_unlock_lock() __sync_synchronize()
+#else
+/*
+ * Copied from CBMC's implementation of __sync_synchronize(), which
+ * seems to be disabled by default.
+ */
+#define smp_mb() __CPROVER_fence("WWfence", "RRfence", "RWfence", "WRfence", \
+                                "WWcumul", "RRcumul", "RWcumul", "WRcumul")
+#define smp_mb__after_unlock_lock() __CPROVER_fence("WWfence", "RRfence", "RWfence", "WRfence", \
+                                   "WWcumul", "RRcumul", "RWcumul", "WRcumul")
+#endif
+
+/*
+ * Allow memory barriers to be disabled in either the read or write side
+ * of SRCU individually.
+ */
+
+#ifndef NO_SYNC_SMP_MB
+#define sync_smp_mb() smp_mb()
+#else
+#define sync_smp_mb() do {} while (0)
+#endif
+
+#ifndef NO_READ_SIDE_SMP_MB
+#define rs_smp_mb() smp_mb()
+#else
+#define rs_smp_mb() do {} while (0)
+#endif
+
+#define ACCESS_ONCE(x) (*(volatile typeof(x) *) &(x))
+#define READ_ONCE(x) ACCESS_ONCE(x)
+#define WRITE_ONCE(x, val) (ACCESS_ONCE(x) = (val))
+
+#endif
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/bug_on.h b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/bug_on.h
new file mode 100644 (file)
index 0000000..2a80e91
--- /dev/null
@@ -0,0 +1,13 @@
+#ifndef BUG_ON_H
+#define BUG_ON_H
+
+#include <assert.h>
+
+#define BUG() assert(0)
+#define BUG_ON(x) assert(!(x))
+
+/* Does it make sense to treat warnings as errors? */
+#define WARN() BUG()
+#define WARN_ON(x) (BUG_ON(x), false)
+
+#endif
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/combined_source.c b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/combined_source.c
new file mode 100644 (file)
index 0000000..29eb5d2
--- /dev/null
@@ -0,0 +1,13 @@
+#include <config.h>
+
+/* Include all source files. */
+
+#include "include_srcu.c"
+
+#include "preempt.c"
+#include "misc.c"
+
+/* Used by test.c files */
+#include <pthread.h>
+#include <stdlib.h>
+#include <linux/srcu.h>
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/config.h b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/config.h
new file mode 100644 (file)
index 0000000..a60038a
--- /dev/null
@@ -0,0 +1,27 @@
+/* "Cheater" definitions based on restricted Kconfig choices. */
+
+#undef CONFIG_TINY_RCU
+#undef __CHECKER__
+#undef CONFIG_DEBUG_LOCK_ALLOC
+#undef CONFIG_DEBUG_OBJECTS_RCU_HEAD
+#undef CONFIG_HOTPLUG_CPU
+#undef CONFIG_MODULES
+#undef CONFIG_NO_HZ_FULL_SYSIDLE
+#undef CONFIG_PREEMPT_COUNT
+#undef CONFIG_PREEMPT_RCU
+#undef CONFIG_PROVE_RCU
+#undef CONFIG_RCU_NOCB_CPU
+#undef CONFIG_RCU_NOCB_CPU_ALL
+#undef CONFIG_RCU_STALL_COMMON
+#undef CONFIG_RCU_TRACE
+#undef CONFIG_RCU_USER_QS
+#undef CONFIG_TASKS_RCU
+#define CONFIG_TREE_RCU
+
+#define CONFIG_GENERIC_ATOMIC64
+
+#if NR_CPUS > 1
+#define CONFIG_SMP
+#else
+#undef CONFIG_SMP
+#endif
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/include_srcu.c b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/include_srcu.c
new file mode 100644 (file)
index 0000000..5ec582a
--- /dev/null
@@ -0,0 +1,31 @@
+#include <config.h>
+
+#include <assert.h>
+#include <errno.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <stddef.h>
+#include <string.h>
+#include <sys/types.h>
+
+#include "int_typedefs.h"
+
+#include "barriers.h"
+#include "bug_on.h"
+#include "locks.h"
+#include "misc.h"
+#include "preempt.h"
+#include "percpu.h"
+#include "workqueues.h"
+
+#ifdef USE_SIMPLE_SYNC_SRCU
+#define synchronize_srcu(sp) synchronize_srcu_original(sp)
+#endif
+
+#include <srcu.c>
+
+#ifdef USE_SIMPLE_SYNC_SRCU
+#undef synchronize_srcu
+
+#include "simple_sync_srcu.c"
+#endif
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/int_typedefs.h b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/int_typedefs.h
new file mode 100644 (file)
index 0000000..3aad639
--- /dev/null
@@ -0,0 +1,33 @@
+#ifndef INT_TYPEDEFS_H
+#define INT_TYPEDEFS_H
+
+#include <inttypes.h>
+
+typedef int8_t s8;
+typedef uint8_t u8;
+typedef int16_t s16;
+typedef uint16_t u16;
+typedef int32_t s32;
+typedef uint32_t u32;
+typedef int64_t s64;
+typedef uint64_t u64;
+
+typedef int8_t __s8;
+typedef uint8_t __u8;
+typedef int16_t __s16;
+typedef uint16_t __u16;
+typedef int32_t __s32;
+typedef uint32_t __u32;
+typedef int64_t __s64;
+typedef uint64_t __u64;
+
+#define S8_C(x) INT8_C(x)
+#define U8_C(x) UINT8_C(x)
+#define S16_C(x) INT16_C(x)
+#define U16_C(x) UINT16_C(x)
+#define S32_C(x) INT32_C(x)
+#define U32_C(x) UINT32_C(x)
+#define S64_C(x) INT64_C(x)
+#define U64_C(x) UINT64_C(x)
+
+#endif
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/locks.h b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/locks.h
new file mode 100644 (file)
index 0000000..3560046
--- /dev/null
@@ -0,0 +1,220 @@
+#ifndef LOCKS_H
+#define LOCKS_H
+
+#include <limits.h>
+#include <pthread.h>
+#include <stdbool.h>
+
+#include "assume.h"
+#include "bug_on.h"
+#include "preempt.h"
+
+int nondet_int(void);
+
+#define __acquire(x)
+#define __acquires(x)
+#define __release(x)
+#define __releases(x)
+
+/* Only use one lock mechanism. Select which one. */
+#ifdef PTHREAD_LOCK
+struct lock_impl {
+       pthread_mutex_t mutex;
+};
+
+static inline void lock_impl_lock(struct lock_impl *lock)
+{
+       BUG_ON(pthread_mutex_lock(&lock->mutex));
+}
+
+static inline void lock_impl_unlock(struct lock_impl *lock)
+{
+       BUG_ON(pthread_mutex_unlock(&lock->mutex));
+}
+
+static inline bool lock_impl_trylock(struct lock_impl *lock)
+{
+       int err = pthread_mutex_trylock(&lock->mutex);
+
+       if (!err)
+               return true;
+       else if (err == EBUSY)
+               return false;
+       BUG();
+}
+
+static inline void lock_impl_init(struct lock_impl *lock)
+{
+       pthread_mutex_init(&lock->mutex, NULL);
+}
+
+#define LOCK_IMPL_INITIALIZER {.mutex = PTHREAD_MUTEX_INITIALIZER}
+
+#else /* !defined(PTHREAD_LOCK) */
+/* Spinlock that assumes that it always gets the lock immediately. */
+
+struct lock_impl {
+       bool locked;
+};
+
+static inline bool lock_impl_trylock(struct lock_impl *lock)
+{
+#ifdef RUN
+       /* TODO: Should this be a test and set? */
+       return __sync_bool_compare_and_swap(&lock->locked, false, true);
+#else
+       __CPROVER_atomic_begin();
+       bool old_locked = lock->locked;
+       lock->locked = true;
+       __CPROVER_atomic_end();
+
+       /* Minimal barrier to prevent accesses leaking out of lock. */
+       __CPROVER_fence("RRfence", "RWfence");
+
+       return !old_locked;
+#endif
+}
+
+static inline void lock_impl_lock(struct lock_impl *lock)
+{
+       /*
+        * CBMC doesn't support busy waiting, so just assume that the
+        * lock is available.
+        */
+       assume(lock_impl_trylock(lock));
+
+       /*
+        * If the lock was already held by this thread then the assumption
+        * is unsatisfiable (deadlock).
+        */
+}
+
+static inline void lock_impl_unlock(struct lock_impl *lock)
+{
+#ifdef RUN
+       BUG_ON(!__sync_bool_compare_and_swap(&lock->locked, true, false));
+#else
+       /* Minimal barrier to prevent accesses leaking out of lock. */
+       __CPROVER_fence("RWfence", "WWfence");
+
+       __CPROVER_atomic_begin();
+       bool old_locked = lock->locked;
+       lock->locked = false;
+       __CPROVER_atomic_end();
+
+       BUG_ON(!old_locked);
+#endif
+}
+
+static inline void lock_impl_init(struct lock_impl *lock)
+{
+       lock->locked = false;
+}
+
+#define LOCK_IMPL_INITIALIZER {.locked = false}
+
+#endif /* !defined(PTHREAD_LOCK) */
+
+/*
+ * Implement spinlocks using the lock mechanism. Wrap the lock to prevent mixing
+ * locks of different types.
+ */
+typedef struct {
+       struct lock_impl internal_lock;
+} spinlock_t;
+
+#define SPIN_LOCK_UNLOCKED {.internal_lock = LOCK_IMPL_INITIALIZER}
+#define __SPIN_LOCK_UNLOCKED(x) SPIN_LOCK_UNLOCKED
+#define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED
+
+static inline void spin_lock_init(spinlock_t *lock)
+{
+       lock_impl_init(&lock->internal_lock);
+}
+
+static inline void spin_lock(spinlock_t *lock)
+{
+       /*
+        * Spin locks also need to be removed in order to eliminate all
+        * memory barriers. They are only used by the write side anyway.
+        */
+#ifndef NO_SYNC_SMP_MB
+       preempt_disable();
+       lock_impl_lock(&lock->internal_lock);
+#endif
+}
+
+static inline void spin_unlock(spinlock_t *lock)
+{
+#ifndef NO_SYNC_SMP_MB
+       lock_impl_unlock(&lock->internal_lock);
+       preempt_enable();
+#endif
+}
+
+/* Don't bother with interrupts */
+#define spin_lock_irq(lock) spin_lock(lock)
+#define spin_unlock_irq(lock) spin_unlock(lock)
+#define spin_lock_irqsave(lock, flags) spin_lock(lock)
+#define spin_unlock_irqrestore(lock, flags) spin_unlock(lock)
+
+/*
+ * This is supposed to return an int, but I think that a bool should work as
+ * well.
+ */
+static inline bool spin_trylock(spinlock_t *lock)
+{
+#ifndef NO_SYNC_SMP_MB
+       preempt_disable();
+       return lock_impl_trylock(&lock->internal_lock);
+#else
+       return true;
+#endif
+}
+
+struct completion {
+       /* Hopefuly this won't overflow. */
+       unsigned int count;
+};
+
+#define COMPLETION_INITIALIZER(x) {.count = 0}
+#define DECLARE_COMPLETION(x) struct completion x = COMPLETION_INITIALIZER(x)
+#define DECLARE_COMPLETION_ONSTACK(x) DECLARE_COMPLETION(x)
+
+static inline void init_completion(struct completion *c)
+{
+       c->count = 0;
+}
+
+static inline void wait_for_completion(struct completion *c)
+{
+       unsigned int prev_count = __sync_fetch_and_sub(&c->count, 1);
+
+       assume(prev_count);
+}
+
+static inline void complete(struct completion *c)
+{
+       unsigned int prev_count = __sync_fetch_and_add(&c->count, 1);
+
+       BUG_ON(prev_count == UINT_MAX);
+}
+
+/* This function probably isn't very useful for CBMC. */
+static inline bool try_wait_for_completion(struct completion *c)
+{
+       BUG();
+}
+
+static inline bool completion_done(struct completion *c)
+{
+       return c->count;
+}
+
+/* TODO: Implement complete_all */
+static inline void complete_all(struct completion *c)
+{
+       BUG();
+}
+
+#endif
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/misc.c b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/misc.c
new file mode 100644 (file)
index 0000000..ca892e3
--- /dev/null
@@ -0,0 +1,11 @@
+#include <config.h>
+
+#include "misc.h"
+#include "bug_on.h"
+
+struct rcu_head;
+
+void wakeme_after_rcu(struct rcu_head *head)
+{
+       BUG();
+}
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/misc.h b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/misc.h
new file mode 100644 (file)
index 0000000..aca5003
--- /dev/null
@@ -0,0 +1,58 @@
+#ifndef MISC_H
+#define MISC_H
+
+#include "assume.h"
+#include "int_typedefs.h"
+#include "locks.h"
+
+#include <linux/types.h>
+
+/* Probably won't need to deal with bottom halves. */
+static inline void local_bh_disable(void) {}
+static inline void local_bh_enable(void) {}
+
+#define MODULE_ALIAS(X)
+#define module_param(...)
+#define EXPORT_SYMBOL_GPL(x)
+
+#define container_of(ptr, type, member) ({                     \
+       const typeof(((type *)0)->member) *__mptr = (ptr);      \
+       (type *)((char *)__mptr - offsetof(type, member));      \
+})
+
+#ifndef USE_SIMPLE_SYNC_SRCU
+/* Abuse udelay to make sure that busy loops terminate. */
+#define udelay(x) assume(0)
+
+#else
+
+/* The simple custom synchronize_srcu is ok with try_check_zero failing. */
+#define udelay(x) do { } while (0)
+#endif
+
+#define trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
+       do { } while (0)
+
+#define notrace
+
+/* Avoid including rcupdate.h */
+struct rcu_synchronize {
+       struct rcu_head head;
+       struct completion completion;
+};
+
+void wakeme_after_rcu(struct rcu_head *head);
+
+#define rcu_lock_acquire(a) do { } while (0)
+#define rcu_lock_release(a) do { } while (0)
+#define rcu_lockdep_assert(c, s) do { } while (0)
+#define RCU_LOCKDEP_WARN(c, s) do { } while (0)
+
+/* Let CBMC non-deterministically choose switch between normal and expedited. */
+bool rcu_gp_is_normal(void);
+bool rcu_gp_is_expedited(void);
+
+/* Do the same for old versions of rcu. */
+#define rcu_expedited (rcu_gp_is_expedited())
+
+#endif
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/percpu.h b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/percpu.h
new file mode 100644 (file)
index 0000000..3de5a49
--- /dev/null
@@ -0,0 +1,92 @@
+#ifndef PERCPU_H
+#define PERCPU_H
+
+#include <stddef.h>
+#include "bug_on.h"
+#include "preempt.h"
+
+#define __percpu
+
+/* Maximum size of any percpu data. */
+#define PERCPU_OFFSET (4 * sizeof(long))
+
+/* Ignore alignment, as CBMC doesn't care about false sharing. */
+#define alloc_percpu(type) __alloc_percpu(sizeof(type), 1)
+
+static inline void *__alloc_percpu(size_t size, size_t align)
+{
+       BUG();
+       return NULL;
+}
+
+static inline void free_percpu(void *ptr)
+{
+       BUG();
+}
+
+#define per_cpu_ptr(ptr, cpu) \
+       ((typeof(ptr)) ((char *) (ptr) + PERCPU_OFFSET * cpu))
+
+#define __this_cpu_inc(pcp) __this_cpu_add(pcp, 1)
+#define __this_cpu_dec(pcp) __this_cpu_sub(pcp, 1)
+#define __this_cpu_sub(pcp, n) __this_cpu_add(pcp, -(typeof(pcp)) (n))
+
+#define this_cpu_inc(pcp) this_cpu_add(pcp, 1)
+#define this_cpu_dec(pcp) this_cpu_sub(pcp, 1)
+#define this_cpu_sub(pcp, n) this_cpu_add(pcp, -(typeof(pcp)) (n))
+
+/* Make CBMC use atomics to work around bug. */
+#ifdef RUN
+#define THIS_CPU_ADD_HELPER(ptr, x) (*(ptr) += (x))
+#else
+/*
+ * Split the atomic into a read and a write so that it has the least
+ * possible ordering.
+ */
+#define THIS_CPU_ADD_HELPER(ptr, x) \
+       do { \
+               typeof(ptr) this_cpu_add_helper_ptr = (ptr); \
+               typeof(ptr) this_cpu_add_helper_x = (x); \
+               typeof(*ptr) this_cpu_add_helper_temp; \
+               __CPROVER_atomic_begin(); \
+               this_cpu_add_helper_temp = *(this_cpu_add_helper_ptr); \
+               __CPROVER_atomic_end(); \
+               this_cpu_add_helper_temp += this_cpu_add_helper_x; \
+               __CPROVER_atomic_begin(); \
+               *(this_cpu_add_helper_ptr) = this_cpu_add_helper_temp; \
+               __CPROVER_atomic_end(); \
+       } while (0)
+#endif
+
+/*
+ * For some reason CBMC needs an atomic operation even though this is percpu
+ * data.
+ */
+#define __this_cpu_add(pcp, n) \
+       do { \
+               BUG_ON(preemptible()); \
+               THIS_CPU_ADD_HELPER(per_cpu_ptr(&(pcp), thread_cpu_id), \
+                                   (typeof(pcp)) (n)); \
+       } while (0)
+
+#define this_cpu_add(pcp, n) \
+       do { \
+               int this_cpu_add_impl_cpu = get_cpu(); \
+               THIS_CPU_ADD_HELPER(per_cpu_ptr(&(pcp), this_cpu_add_impl_cpu), \
+                                   (typeof(pcp)) (n)); \
+               put_cpu(); \
+       } while (0)
+
+/*
+ * This will cause a compiler warning because of the cast from char[][] to
+ * type*. This will cause a compile time error if type is too big.
+ */
+#define DEFINE_PER_CPU(type, name) \
+       char name[NR_CPUS][PERCPU_OFFSET]; \
+       typedef char percpu_too_big_##name \
+               [sizeof(type) > PERCPU_OFFSET ? -1 : 1]
+
+#define for_each_possible_cpu(cpu) \
+       for ((cpu) = 0; (cpu) < NR_CPUS; ++(cpu))
+
+#endif
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/preempt.c b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/preempt.c
new file mode 100644 (file)
index 0000000..4f1b068
--- /dev/null
@@ -0,0 +1,78 @@
+#include <config.h>
+
+#include "preempt.h"
+
+#include "assume.h"
+#include "locks.h"
+
+/* Support NR_CPUS of at most 64 */
+#define CPU_PREEMPTION_LOCKS_INIT0 LOCK_IMPL_INITIALIZER
+#define CPU_PREEMPTION_LOCKS_INIT1 \
+       CPU_PREEMPTION_LOCKS_INIT0, CPU_PREEMPTION_LOCKS_INIT0
+#define CPU_PREEMPTION_LOCKS_INIT2 \
+       CPU_PREEMPTION_LOCKS_INIT1, CPU_PREEMPTION_LOCKS_INIT1
+#define CPU_PREEMPTION_LOCKS_INIT3 \
+       CPU_PREEMPTION_LOCKS_INIT2, CPU_PREEMPTION_LOCKS_INIT2
+#define CPU_PREEMPTION_LOCKS_INIT4 \
+       CPU_PREEMPTION_LOCKS_INIT3, CPU_PREEMPTION_LOCKS_INIT3
+#define CPU_PREEMPTION_LOCKS_INIT5 \
+       CPU_PREEMPTION_LOCKS_INIT4, CPU_PREEMPTION_LOCKS_INIT4
+
+/*
+ * Simulate disabling preemption by locking a particular cpu. NR_CPUS
+ * should be the actual number of cpus, not just the maximum.
+ */
+struct lock_impl cpu_preemption_locks[NR_CPUS] = {
+       CPU_PREEMPTION_LOCKS_INIT0
+#if (NR_CPUS - 1) & 1
+       , CPU_PREEMPTION_LOCKS_INIT0
+#endif
+#if (NR_CPUS - 1) & 2
+       , CPU_PREEMPTION_LOCKS_INIT1
+#endif
+#if (NR_CPUS - 1) & 4
+       , CPU_PREEMPTION_LOCKS_INIT2
+#endif
+#if (NR_CPUS - 1) & 8
+       , CPU_PREEMPTION_LOCKS_INIT3
+#endif
+#if (NR_CPUS - 1) & 16
+       , CPU_PREEMPTION_LOCKS_INIT4
+#endif
+#if (NR_CPUS - 1) & 32
+       , CPU_PREEMPTION_LOCKS_INIT5
+#endif
+};
+
+#undef CPU_PREEMPTION_LOCKS_INIT0
+#undef CPU_PREEMPTION_LOCKS_INIT1
+#undef CPU_PREEMPTION_LOCKS_INIT2
+#undef CPU_PREEMPTION_LOCKS_INIT3
+#undef CPU_PREEMPTION_LOCKS_INIT4
+#undef CPU_PREEMPTION_LOCKS_INIT5
+
+__thread int thread_cpu_id;
+__thread int preempt_disable_count;
+
+void preempt_disable(void)
+{
+       BUG_ON(preempt_disable_count < 0 || preempt_disable_count == INT_MAX);
+
+       if (preempt_disable_count++)
+               return;
+
+       thread_cpu_id = nondet_int();
+       assume(thread_cpu_id >= 0);
+       assume(thread_cpu_id < NR_CPUS);
+       lock_impl_lock(&cpu_preemption_locks[thread_cpu_id]);
+}
+
+void preempt_enable(void)
+{
+       BUG_ON(preempt_disable_count < 1);
+
+       if (--preempt_disable_count)
+               return;
+
+       lock_impl_unlock(&cpu_preemption_locks[thread_cpu_id]);
+}
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/preempt.h b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/preempt.h
new file mode 100644 (file)
index 0000000..2f95ee0
--- /dev/null
@@ -0,0 +1,58 @@
+#ifndef PREEMPT_H
+#define PREEMPT_H
+
+#include <stdbool.h>
+
+#include "bug_on.h"
+
+/* This flag contains garbage if preempt_disable_count is 0. */
+extern __thread int thread_cpu_id;
+
+/* Support recursive preemption disabling. */
+extern __thread int preempt_disable_count;
+
+void preempt_disable(void);
+void preempt_enable(void);
+
+static inline void preempt_disable_notrace(void)
+{
+       preempt_disable();
+}
+
+static inline void preempt_enable_no_resched(void)
+{
+       preempt_enable();
+}
+
+static inline void preempt_enable_notrace(void)
+{
+       preempt_enable();
+}
+
+static inline int preempt_count(void)
+{
+       return preempt_disable_count;
+}
+
+static inline bool preemptible(void)
+{
+       return !preempt_count();
+}
+
+static inline int get_cpu(void)
+{
+       preempt_disable();
+       return thread_cpu_id;
+}
+
+static inline void put_cpu(void)
+{
+       preempt_enable();
+}
+
+static inline void might_sleep(void)
+{
+       BUG_ON(preempt_disable_count);
+}
+
+#endif
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/simple_sync_srcu.c b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/simple_sync_srcu.c
new file mode 100644 (file)
index 0000000..ac9cbc6
--- /dev/null
@@ -0,0 +1,50 @@
+#include <config.h>
+
+#include <assert.h>
+#include <errno.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <stddef.h>
+#include <string.h>
+#include <sys/types.h>
+
+#include "int_typedefs.h"
+
+#include "barriers.h"
+#include "bug_on.h"
+#include "locks.h"
+#include "misc.h"
+#include "preempt.h"
+#include "percpu.h"
+#include "workqueues.h"
+
+#include <linux/srcu.h>
+
+/* Functions needed from modify_srcu.c */
+bool try_check_zero(struct srcu_struct *sp, int idx, int trycount);
+void srcu_flip(struct srcu_struct *sp);
+
+/* Simpler implementation of synchronize_srcu that ignores batching. */
+void synchronize_srcu(struct srcu_struct *sp)
+{
+       int idx;
+       /*
+        * This code assumes that try_check_zero will succeed anyway,
+        * so there is no point in multiple tries.
+        */
+       const int trycount = 1;
+
+       might_sleep();
+
+       /* Ignore the lock, as multiple writers aren't working yet anyway. */
+
+       idx = 1 ^ (sp->completed & 1);
+
+       /* For comments see srcu_advance_batches. */
+
+       assume(try_check_zero(sp, idx, trycount));
+
+       srcu_flip(sp);
+
+       assume(try_check_zero(sp, idx^1, trycount));
+}
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/workqueues.h b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/workqueues.h
new file mode 100644 (file)
index 0000000..e58c8df
--- /dev/null
@@ -0,0 +1,102 @@
+#ifndef WORKQUEUES_H
+#define WORKQUEUES_H
+
+#include <stdbool.h>
+
+#include "barriers.h"
+#include "bug_on.h"
+#include "int_typedefs.h"
+
+#include <linux/types.h>
+
+/* Stub workqueue implementation. */
+
+struct work_struct;
+typedef void (*work_func_t)(struct work_struct *work);
+void delayed_work_timer_fn(unsigned long __data);
+
+struct work_struct {
+/*     atomic_long_t data; */
+       unsigned long data;
+
+       struct list_head entry;
+       work_func_t func;
+#ifdef CONFIG_LOCKDEP
+       struct lockdep_map lockdep_map;
+#endif
+};
+
+struct timer_list {
+       struct hlist_node       entry;
+       unsigned long           expires;
+       void                    (*function)(unsigned long);
+       unsigned long           data;
+       u32                     flags;
+       int                     slack;
+};
+
+struct delayed_work {
+       struct work_struct work;
+       struct timer_list timer;
+
+       /* target workqueue and CPU ->timer uses to queue ->work */
+       struct workqueue_struct *wq;
+       int cpu;
+};
+
+
+static inline bool schedule_work(struct work_struct *work)
+{
+       BUG();
+       return true;
+}
+
+static inline bool schedule_work_on(int cpu, struct work_struct *work)
+{
+       BUG();
+       return true;
+}
+
+static inline bool queue_work(struct workqueue_struct *wq,
+                             struct work_struct *work)
+{
+       BUG();
+       return true;
+}
+
+static inline bool queue_delayed_work(struct workqueue_struct *wq,
+                                     struct delayed_work *dwork,
+                                     unsigned long delay)
+{
+       BUG();
+       return true;
+}
+
+#define INIT_WORK(w, f) \
+       do { \
+               (w)->data = 0; \
+               (w)->func = (f); \
+       } while (0)
+
+#define INIT_DELAYED_WORK(w, f) INIT_WORK(&(w)->work, (f))
+
+#define __WORK_INITIALIZER(n, f) { \
+               .data = 0, \
+               .entry = { &(n).entry, &(n).entry }, \
+               .func = f \
+       }
+
+/* Don't bother initializing timer. */
+#define __DELAYED_WORK_INITIALIZER(n, f, tflags) { \
+       .work = __WORK_INITIALIZER((n).work, (f)), \
+       }
+
+#define DECLARE_WORK(n, f) \
+       struct workqueue_struct n = __WORK_INITIALIZER
+
+#define DECLARE_DELAYED_WORK(n, f) \
+       struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, 0)
+
+#define system_power_efficient_wq ((struct workqueue_struct *) NULL)
+
+#endif
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/tests/store_buffering/.gitignore b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/tests/store_buffering/.gitignore
new file mode 100644 (file)
index 0000000..f47cb20
--- /dev/null
@@ -0,0 +1 @@
+*.out
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/tests/store_buffering/Makefile b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/tests/store_buffering/Makefile
new file mode 100644 (file)
index 0000000..3a3aee1
--- /dev/null
@@ -0,0 +1,11 @@
+CBMC_FLAGS = -I../.. -I../../src -I../../include -I../../empty_includes -32 -pointer-check -mm pso
+
+all:
+       for i in ./*.pass; do \
+               echo $$i ; \
+               CBMC_FLAGS="$(CBMC_FLAGS)" sh ../test_script.sh --should-pass $$i > $$i.out 2>&1 ; \
+       done
+       for i in ./*.fail; do \
+               echo $$i ; \
+               CBMC_FLAGS="$(CBMC_FLAGS)" sh ../test_script.sh --should-fail $$i > $$i.out 2>&1 ; \
+       done
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/tests/store_buffering/assert_end.fail b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/tests/store_buffering/assert_end.fail
new file mode 100644 (file)
index 0000000..40c8075
--- /dev/null
@@ -0,0 +1 @@
+test_cbmc_options="-DASSERT_END"
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/tests/store_buffering/force.fail b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/tests/store_buffering/force.fail
new file mode 100644 (file)
index 0000000..ada5baf
--- /dev/null
@@ -0,0 +1 @@
+test_cbmc_options="-DFORCE_FAILURE"
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/tests/store_buffering/force2.fail b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/tests/store_buffering/force2.fail
new file mode 100644 (file)
index 0000000..8fe00c8
--- /dev/null
@@ -0,0 +1 @@
+test_cbmc_options="-DFORCE_FAILURE_2"
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/tests/store_buffering/force3.fail b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/tests/store_buffering/force3.fail
new file mode 100644 (file)
index 0000000..612ed67
--- /dev/null
@@ -0,0 +1 @@
+test_cbmc_options="-DFORCE_FAILURE_3"
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/tests/store_buffering/main.pass b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/tests/store_buffering/main.pass
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/tests/store_buffering/test.c b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/tests/store_buffering/test.c
new file mode 100644 (file)
index 0000000..470b110
--- /dev/null
@@ -0,0 +1,72 @@
+#include <src/combined_source.c>
+
+int x;
+int y;
+
+int __unbuffered_tpr_x;
+int __unbuffered_tpr_y;
+
+DEFINE_SRCU(ss);
+
+void rcu_reader(void)
+{
+       int idx;
+
+#ifndef FORCE_FAILURE_3
+       idx = srcu_read_lock(&ss);
+#endif
+       might_sleep();
+
+       __unbuffered_tpr_y = READ_ONCE(y);
+#ifdef FORCE_FAILURE
+       srcu_read_unlock(&ss, idx);
+       idx = srcu_read_lock(&ss);
+#endif
+       WRITE_ONCE(x, 1);
+
+#ifndef FORCE_FAILURE_3
+       srcu_read_unlock(&ss, idx);
+#endif
+       might_sleep();
+}
+
+void *thread_update(void *arg)
+{
+       WRITE_ONCE(y, 1);
+#ifndef FORCE_FAILURE_2
+       synchronize_srcu(&ss);
+#endif
+       might_sleep();
+       __unbuffered_tpr_x = READ_ONCE(x);
+
+       return NULL;
+}
+
+void *thread_process_reader(void *arg)
+{
+       rcu_reader();
+
+       return NULL;
+}
+
+int main(int argc, char *argv[])
+{
+       pthread_t tu;
+       pthread_t tpr;
+
+       if (pthread_create(&tu, NULL, thread_update, NULL))
+               abort();
+       if (pthread_create(&tpr, NULL, thread_process_reader, NULL))
+               abort();
+       if (pthread_join(tu, NULL))
+               abort();
+       if (pthread_join(tpr, NULL))
+               abort();
+       assert(__unbuffered_tpr_y != 0 || __unbuffered_tpr_x != 0);
+
+#ifdef ASSERT_END
+       assert(0);
+#endif
+
+       return 0;
+}
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/tests/test_script.sh b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/tests/test_script.sh
new file mode 100755 (executable)
index 0000000..d154597
--- /dev/null
@@ -0,0 +1,102 @@
+#!/bin/sh
+
+# This script expects a mode (either --should-pass or --should-fail) followed by
+# an input file. The script uses the following environment variables. The test C
+# source file is expected to be named test.c in the directory containing the
+# input file.
+#
+# CBMC: The command to run CBMC. Default: cbmc
+# CBMC_FLAGS: Additional flags to pass to CBMC
+# NR_CPUS: Number of cpus to run tests with. Default specified by the test
+# SYNC_SRCU_MODE: Choose implementation of synchronize_srcu. Defaults to simple.
+#                 kernel: Version included in the linux kernel source.
+#                 simple: Use try_check_zero directly.
+#
+# The input file is a script that is sourced by this file. It can define any of
+# the following variables to configure the test.
+#
+# test_cbmc_options: Extra options to pass to CBMC.
+# min_cpus_fail: Minimum number of CPUs (NR_CPUS) for verification to fail.
+#                The test is expected to pass if it is run with fewer. (Only
+#                useful for .fail files)
+# default_cpus: Quantity of CPUs to use for the test, if not specified on the
+#               command line. Default: Larger of 2 and MIN_CPUS_FAIL.
+
+set -e
+
+if test "$#" -ne 2; then
+       echo "Expected one option followed by an input file" 1>&2
+       exit 99
+fi
+
+if test "x$1" = "x--should-pass"; then
+       should_pass="yes"
+elif test "x$1" = "x--should-fail"; then
+       should_pass="no"
+else
+       echo "Unrecognized argument '$1'" 1>&2
+
+       # Exit code 99 indicates a hard error.
+       exit 99
+fi
+
+CBMC=${CBMC:-cbmc}
+
+SYNC_SRCU_MODE=${SYNC_SRCU_MODE:-simple}
+
+case ${SYNC_SRCU_MODE} in
+kernel) sync_srcu_mode_flags="" ;;
+simple) sync_srcu_mode_flags="-DUSE_SIMPLE_SYNC_SRCU" ;;
+
+*)
+       echo "Unrecognized argument '${SYNC_SRCU_MODE}'" 1>&2
+       exit 99
+       ;;
+esac
+
+min_cpus_fail=1
+
+c_file=`dirname "$2"`/test.c
+
+# Source the input file.
+. $2
+
+if test ${min_cpus_fail} -gt 2; then
+       default_default_cpus=${min_cpus_fail}
+else
+       default_default_cpus=2
+fi
+default_cpus=${default_cpus:-${default_default_cpus}}
+cpus=${NR_CPUS:-${default_cpus}}
+
+# Check if there are two few cpus to make the test fail.
+if test $cpus -lt ${min_cpus_fail:-0}; then
+       should_pass="yes"
+fi
+
+cbmc_opts="-DNR_CPUS=${cpus} ${sync_srcu_mode_flags} ${test_cbmc_options} ${CBMC_FLAGS}"
+
+echo "Running CBMC: ${CBMC} ${cbmc_opts} ${c_file}"
+if ${CBMC} ${cbmc_opts} "${c_file}"; then
+       # Verification successful. Make sure that it was supposed to verify.
+       test "x${should_pass}" = xyes
+else
+       cbmc_exit_status=$?
+
+       # An exit status of 10 indicates a failed verification.
+       # (see cbmc_parse_optionst::do_bmc in the CBMC source code)
+       if test ${cbmc_exit_status} -eq 10 && test "x${should_pass}" = xno; then
+               :
+       else
+               echo "CBMC returned ${cbmc_exit_status} exit status" 1>&2
+
+               # Parse errors have exit status 6. Any other type of error
+               # should be considered a hard error.
+               if test ${cbmc_exit_status} -ne 6 && \
+                  test ${cbmc_exit_status} -ne 10; then
+                       exit 99
+               else
+                       exit 1
+               fi
+       fi
+fi
index 8c1cb423cfe6939addceb2da403bed74cfc97e30..83d8b1c6cb0e54d03b9f59403e7b72acee089de8 100644 (file)
@@ -5,12 +5,12 @@ include ../lib.mk
 .PHONY: all all_32 all_64 warn_32bit_failure clean
 
 TARGETS_C_BOTHBITS := single_step_syscall sysret_ss_attrs syscall_nt ptrace_syscall test_mremap_vdso \
-                       check_initial_reg_state sigreturn ldt_gdt iopl \
+                       check_initial_reg_state sigreturn ldt_gdt iopl mpx-mini-test \
                        protection_keys test_vdso
 TARGETS_C_32BIT_ONLY := entry_from_vm86 syscall_arg_fault test_syscall_vdso unwind_vdso \
                        test_FCMOV test_FCOMI test_FISTTP \
                        vdso_restorer
-TARGETS_C_64BIT_ONLY := fsgsbase
+TARGETS_C_64BIT_ONLY := fsgsbase sysret_rip
 
 TARGETS_C_32BIT_ALL := $(TARGETS_C_BOTHBITS) $(TARGETS_C_32BIT_ONLY)
 TARGETS_C_64BIT_ALL := $(TARGETS_C_BOTHBITS) $(TARGETS_C_64BIT_ONLY)
diff --git a/tools/testing/selftests/x86/sysret_rip.c b/tools/testing/selftests/x86/sysret_rip.c
new file mode 100644 (file)
index 0000000..d85ec5b
--- /dev/null
@@ -0,0 +1,195 @@
+/*
+ * sigreturn.c - tests that x86 avoids Intel SYSRET pitfalls
+ * Copyright (c) 2014-2016 Andrew Lutomirski
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#define _GNU_SOURCE
+
+#include <stdlib.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <string.h>
+#include <inttypes.h>
+#include <sys/signal.h>
+#include <sys/ucontext.h>
+#include <sys/syscall.h>
+#include <err.h>
+#include <stddef.h>
+#include <stdbool.h>
+#include <setjmp.h>
+#include <sys/user.h>
+#include <sys/mman.h>
+#include <assert.h>
+
+
+asm (
+       ".pushsection \".text\", \"ax\"\n\t"
+       ".balign 4096\n\t"
+       "test_page: .globl test_page\n\t"
+       ".fill 4094,1,0xcc\n\t"
+       "test_syscall_insn:\n\t"
+       "syscall\n\t"
+       ".ifne . - test_page - 4096\n\t"
+       ".error \"test page is not one page long\"\n\t"
+       ".endif\n\t"
+       ".popsection"
+    );
+
+extern const char test_page[];
+static void const *current_test_page_addr = test_page;
+
+static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
+                      int flags)
+{
+       struct sigaction sa;
+       memset(&sa, 0, sizeof(sa));
+       sa.sa_sigaction = handler;
+       sa.sa_flags = SA_SIGINFO | flags;
+       sigemptyset(&sa.sa_mask);
+       if (sigaction(sig, &sa, 0))
+               err(1, "sigaction");
+}
+
+static void clearhandler(int sig)
+{
+       struct sigaction sa;
+       memset(&sa, 0, sizeof(sa));
+       sa.sa_handler = SIG_DFL;
+       sigemptyset(&sa.sa_mask);
+       if (sigaction(sig, &sa, 0))
+               err(1, "sigaction");
+}
+
+/* State used by our signal handlers. */
+static gregset_t initial_regs;
+
+static volatile unsigned long rip;
+
+static void sigsegv_for_sigreturn_test(int sig, siginfo_t *info, void *ctx_void)
+{
+       ucontext_t *ctx = (ucontext_t*)ctx_void;
+
+       if (rip != ctx->uc_mcontext.gregs[REG_RIP]) {
+               printf("[FAIL]\tRequested RIP=0x%lx but got RIP=0x%lx\n",
+                      rip, (unsigned long)ctx->uc_mcontext.gregs[REG_RIP]);
+               fflush(stdout);
+               _exit(1);
+       }
+
+       memcpy(&ctx->uc_mcontext.gregs, &initial_regs, sizeof(gregset_t));
+
+       printf("[OK]\tGot SIGSEGV at RIP=0x%lx\n", rip);
+}
+
+static void sigusr1(int sig, siginfo_t *info, void *ctx_void)
+{
+       ucontext_t *ctx = (ucontext_t*)ctx_void;
+
+       memcpy(&initial_regs, &ctx->uc_mcontext.gregs, sizeof(gregset_t));
+
+       /* Set IP and CX to match so that SYSRET can happen. */
+       ctx->uc_mcontext.gregs[REG_RIP] = rip;
+       ctx->uc_mcontext.gregs[REG_RCX] = rip;
+
+       /* R11 and EFLAGS should already match. */
+       assert(ctx->uc_mcontext.gregs[REG_EFL] ==
+              ctx->uc_mcontext.gregs[REG_R11]);
+
+       sethandler(SIGSEGV, sigsegv_for_sigreturn_test, SA_RESETHAND);
+
+       return;
+}
+
+static void test_sigreturn_to(unsigned long ip)
+{
+       rip = ip;
+       printf("[RUN]\tsigreturn to 0x%lx\n", ip);
+       raise(SIGUSR1);
+}
+
+static jmp_buf jmpbuf;
+
+static void sigsegv_for_fallthrough(int sig, siginfo_t *info, void *ctx_void)
+{
+       ucontext_t *ctx = (ucontext_t*)ctx_void;
+
+       if (rip != ctx->uc_mcontext.gregs[REG_RIP]) {
+               printf("[FAIL]\tExpected SIGSEGV at 0x%lx but got RIP=0x%lx\n",
+                      rip, (unsigned long)ctx->uc_mcontext.gregs[REG_RIP]);
+               fflush(stdout);
+               _exit(1);
+       }
+
+       siglongjmp(jmpbuf, 1);
+}
+
+static void test_syscall_fallthrough_to(unsigned long ip)
+{
+       void *new_address = (void *)(ip - 4096);
+       void *ret;
+
+       printf("[RUN]\tTrying a SYSCALL that falls through to 0x%lx\n", ip);
+
+       ret = mremap((void *)current_test_page_addr, 4096, 4096,
+                    MREMAP_MAYMOVE | MREMAP_FIXED, new_address);
+       if (ret == MAP_FAILED) {
+               if (ip <= (1UL << 47) - PAGE_SIZE) {
+                       err(1, "mremap to %p", new_address);
+               } else {
+                       printf("[OK]\tmremap to %p failed\n", new_address);
+                       return;
+               }
+       }
+
+       if (ret != new_address)
+               errx(1, "mremap malfunctioned: asked for %p but got %p\n",
+                    new_address, ret);
+
+       current_test_page_addr = new_address;
+       rip = ip;
+
+       if (sigsetjmp(jmpbuf, 1) == 0) {
+               asm volatile ("call *%[syscall_insn]" :: "a" (SYS_getpid),
+                             [syscall_insn] "rm" (ip - 2));
+               errx(1, "[FAIL]\tSyscall trampoline returned");
+       }
+
+       printf("[OK]\tWe survived\n");
+}
+
+int main()
+{
+       /*
+        * When the kernel returns from a slow-path syscall, it will
+        * detect whether SYSRET is appropriate.  If it incorrectly
+        * thinks that SYSRET is appropriate when RIP is noncanonical,
+        * it'll crash on Intel CPUs.
+        */
+       sethandler(SIGUSR1, sigusr1, 0);
+       for (int i = 47; i < 64; i++)
+               test_sigreturn_to(1UL<<i);
+
+       clearhandler(SIGUSR1);
+
+       sethandler(SIGSEGV, sigsegv_for_fallthrough, 0);
+
+       /* One extra test to check that we didn't screw up the mremap logic. */
+       test_syscall_fallthrough_to((1UL << 47) - 2*PAGE_SIZE);
+
+       /* These are the interesting cases. */
+       for (int i = 47; i < 64; i++) {
+               test_syscall_fallthrough_to((1UL<<i) - PAGE_SIZE);
+               test_syscall_fallthrough_to(1UL<<i);
+       }
+
+       return 0;
+}